summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordavidovski <david@davidovski.xyz>2022-06-27 23:09:07 +0100
committerdavidovski <david@davidovski.xyz>2022-06-27 23:09:07 +0100
commitf6332a43c35387c4a2dea1746be5fd092890ae0e (patch)
treed6599f63de04096f3fc21a98e0b3bb39d55a3531
parentf13e0cac13f90f7f57bce3b26b2e6383de6e4ad2 (diff)
added lf and iptables
-rw-r--r--repo/augeas/acf.aug7
-rw-r--r--repo/augeas/augeas.xibuild44
-rw-r--r--repo/augeas/awall.aug9
-rw-r--r--repo/augeas/fix-test.patch13
-rw-r--r--repo/bridge-utils/bridge-utils.xibuild32
-rw-r--r--repo/bridge-utils/fix-PATH_MAX-on-ppc64le.patch26
-rw-r--r--repo/ddcutil/ddcutil.xibuild25
-rw-r--r--repo/dmidecode/dmidecode.xibuild23
-rw-r--r--repo/dnsmasq/0000-fix-heap-overflow-in-dns-replies.patch66
-rw-r--r--repo/dnsmasq/0001-Retry-on-interrupted-error-in-tftp.patch27
-rw-r--r--repo/dnsmasq/0002-Add-safety-checks-to-places-pointed-by-Coverity.patch45
-rw-r--r--repo/dnsmasq/0003-Small-safeguard-to-unexpected-data.patch30
-rw-r--r--repo/dnsmasq/0004-Fix-bunch-of-warnings-in-auth.c.patch80
-rw-r--r--repo/dnsmasq/0005-Fix-few-coverity-warnings-in-lease-tools.patch92
-rw-r--r--repo/dnsmasq/0006-Fix-coverity-formats-issues-in-blockdata.patch23
-rw-r--r--repo/dnsmasq/0007-Retry-dhcp6-ping-on-interrupts.patch23
-rw-r--r--repo/dnsmasq/0008-Fix-coverity-warnings-on-dbus.patch84
-rw-r--r--repo/dnsmasq/0009-Address-coverity-issues-detected-in-util.c.patch58
-rw-r--r--repo/dnsmasq/0010-Fix-coverity-detected-issues-in-option.c.patch135
-rw-r--r--repo/dnsmasq/0011-Fix-coverity-detected-issue-in-radv.c.patch23
-rw-r--r--repo/dnsmasq/0012-Fix-coverity-detected-issues-in-cache.c.patch23
-rw-r--r--repo/dnsmasq/0013-Fix-coverity-issues-detected-in-domain-match.c.patch60
-rw-r--r--repo/dnsmasq/0014-Fix-coverity-detected-issues-in-dnsmasq.c.patch69
-rw-r--r--repo/dnsmasq/0015-Fix-coverity-issues-in-dnssec.c.patch35
-rw-r--r--repo/dnsmasq/0020-Fix-crash-after-re-reading-empty-resolv.conf.patch38
-rw-r--r--repo/dnsmasq/CVE-2022-0934.patch189
-rw-r--r--repo/dnsmasq/config.h.patch12
-rw-r--r--repo/dnsmasq/dnsmasq-dnssec.pre-install6
-rw-r--r--repo/dnsmasq/dnsmasq-dnssec.pre-upgrade6
-rw-r--r--repo/dnsmasq/dnsmasq.conf.patch38
-rw-r--r--repo/dnsmasq/dnsmasq.confd22
-rw-r--r--repo/dnsmasq/dnsmasq.initd151
-rw-r--r--repo/dnsmasq/dnsmasq.pre-install6
-rw-r--r--repo/dnsmasq/dnsmasq.pre-upgrade6
-rw-r--r--repo/dnsmasq/dnsmasq.xibuild64
-rw-r--r--repo/docbook2x/01_fix_static_datadir_evaluation.patch19
-rw-r--r--repo/docbook2x/02_fix_418703_dont_use_abbreviated_sfnet_address.patch27
-rw-r--r--repo/docbook2x/03_fix_420153_filename_whitespace_handling.patch43
-rw-r--r--repo/docbook2x/04_fix_442782_preprocessor_declaration_syntax.patch90
-rw-r--r--repo/docbook2x/05_fix_439214_error_on_missing_refentry.patch33
-rw-r--r--repo/docbook2x/06_fix_man_typo.patch24
-rw-r--r--repo/docbook2x/docbook2x.xibuild39
-rw-r--r--repo/electrum/0001-apk-add-instead-of-apt-get-install.patch22
-rw-r--r--repo/electrum/electrum.xibuild31
-rw-r--r--repo/freeciv/freeciv.xibuild31
-rw-r--r--repo/glfw/glfw.xibuild23
-rw-r--r--repo/i2c-tools/i2c-tools.xibuild31
-rw-r--r--repo/iptables/ebtables.confd15
-rw-r--r--repo/iptables/ebtables.initd99
-rw-r--r--repo/iptables/ip6tables.confd14
-rw-r--r--repo/iptables/iptables.confd14
-rw-r--r--repo/iptables/iptables.initd135
-rw-r--r--repo/iptables/iptables.xibuild59
-rw-r--r--repo/iptables/use-sh-iptables-apply.patch39
-rw-r--r--repo/lf/lf.xibuild33
-rw-r--r--repo/libmnl/libmnl.xibuild36
-rw-r--r--repo/libmnl/musl-fix-headers.patch13
-rw-r--r--repo/libnftnl/libnftnl.xibuild31
-rw-r--r--repo/libsecp256k1/libsecp256k1.xibuild28
-rw-r--r--repo/libtheora/automake.patch11
-rw-r--r--repo/libtheora/enc.patch11
-rw-r--r--repo/libtheora/fix-mmx.patch31
-rw-r--r--repo/libtheora/fix-timeb.patch75
-rw-r--r--repo/libtheora/libtheora-flags.patch14
-rw-r--r--repo/libvirt/libvirt-6.0.0-fix_paths_in_libvirt-guests_sh.patch35
-rw-r--r--repo/libvirt/libvirt-guests.confd68
-rw-r--r--repo/libvirt/libvirt-guests.initd237
-rw-r--r--repo/libvirt/libvirt.confd20
-rw-r--r--repo/libvirt/libvirt.initd40
-rwxr-xr-xrepo/libvirt/libvirt.post-install5
-rw-r--r--repo/libvirt/libvirt.xibuild54
-rw-r--r--repo/libvirt/musl-fix-includes.patch12
-rw-r--r--repo/libvirt/stderr-fix.patch13
-rw-r--r--repo/libvirt/virtlockd.initd24
-rw-r--r--repo/libvirt/virtlogd.initd24
-rw-r--r--repo/lxc/lxc.confd10
-rw-r--r--repo/lxc/lxc.initd163
-rw-r--r--repo/lxc/lxc.xibuild42
-rw-r--r--repo/maven/maven.xibuild39
-rw-r--r--repo/netcf/netcf.xibuild26
-rw-r--r--repo/npm/dont-check-for-last-version.patch15
-rw-r--r--repo/npm/npm.xibuild89
-rw-r--r--repo/npm/npmrc6
-rw-r--r--repo/perl-path-tiny/perl-path-tiny.xibuild26
-rw-r--r--repo/perl-xml-namespacesupport/perl-xml-namespacesupport.xibuild24
-rw-r--r--repo/perl-xml-sax-base/perl-xml-sax-base.xibuild24
-rw-r--r--repo/perl-xml-sax/perl-xml-sax.xibuild25
-rw-r--r--repo/perl-xml-xpath/perl-xml-xpath.xibuild24
-rw-r--r--repo/poetry/fix-packaging-tags.patch97
-rw-r--r--repo/poetry/poetry.xibuild35
-rw-r--r--repo/protobuf/protobuf.xibuild38
-rw-r--r--repo/protobuf/ruby-fix-cflags.patch16
-rw-r--r--repo/protobuf/skip-failing-tests.patch70
-rw-r--r--repo/protobuf/trim-rakefile.patch74
-rw-r--r--repo/python-aiohttp-socks/python-aiohttp-socks.xibuild17
-rw-r--r--repo/python-aiohttp/python-aiohttp.xibuild17
-rw-r--r--repo/python-aiorpcx/python-aiorpcx.xibuild17
-rw-r--r--repo/python-aiosignal/python-aiosignal.xibuild17
-rw-r--r--repo/python-atomicwrites/python-atomicwrites.xibuild17
-rw-r--r--repo/python-attr/python-attr.xibuild17
-rw-r--r--repo/python-attrs/python-attrs.xibuild17
-rw-r--r--repo/python-bitstring/python-bitstring.xibuild17
-rw-r--r--repo/python-charset-normalizer/python-charset-normalizer.xibuild17
-rw-r--r--repo/python-dnspython/python-dnspython.xibuild17
-rw-r--r--repo/python-ecdsa/python-ecdsa.xibuild17
-rw-r--r--repo/python-exceptiongroup/python-exceptiongroup.xibuild26
-rw-r--r--repo/python-exceptiongroup/use-flit-core.patch38
-rw-r--r--repo/python-flit-core/python-flit-core.xibuild27
-rw-r--r--repo/python-frozenlist/python-frozenlist.xibuild17
-rw-r--r--repo/python-future/python-future.xibuild17
-rw-r--r--repo/python-hypothesis/python-hypothesis.xibuild17
-rw-r--r--repo/python-idna-ssl/python-idna-ssl.xibuild17
-rw-r--r--repo/python-importlib-metadata/python-importlib-metadata.xibuild19
-rw-r--r--repo/python-iniconfig/python-iniconfig.xibuild17
-rw-r--r--repo/python-lark/python-lark.xibuild17
-rw-r--r--repo/python-logbook/python-logbook.xibuild19
-rw-r--r--repo/python-matrix-nio/python-matrix-nio.xibuild31
-rw-r--r--repo/python-nio/python-nio.xibuild20
-rw-r--r--repo/python-openssl/python-openssl.xibuild18
-rw-r--r--repo/python-pillow/python-pillow.xibuild17
-rw-r--r--repo/python-ply/python-ply.xibuild17
-rw-r--r--repo/python-poetry-core/python-poetry-core.xibuild30
-rw-r--r--repo/python-py/python-py.xibuild17
-rw-r--r--repo/python-pycryptodomex/python-pycryptodomex.xibuild17
-rw-r--r--repo/python-pyopenssl/python-pyopenssl.xibuild17
-rw-r--r--repo/python-pyqt-builder/python-pyqt-builder.xibuild17
-rw-r--r--repo/python-pyqt5-sip/python-pyqt5-sip.xibuild17
-rw-r--r--repo/python-pyqt5/python-pyqt5.xibuild23
-rw-r--r--repo/python-pyqtwebengine/python-pyqtwebengine.xibuild26
-rw-r--r--repo/python-pyrsistent/python-pyrsistent.xibuild27
-rw-r--r--repo/python-qrcode/python-qrcode.xibuild17
-rw-r--r--repo/python-regex/python-regex.xibuild17
-rw-r--r--repo/python-sip/python-sip.xibuild17
-rw-r--r--repo/python-sortedcontainers/python-sortedcontainers.xibuild17
-rw-r--r--repo/python-sphinx-rtd-theme/python-sphinx-rtd-theme.xibuild17
-rw-r--r--repo/python-testpath/python-testpath.xibuild23
-rw-r--r--repo/python-tomlkit/python-tomlkit.xibuild17
-rw-r--r--repo/python-webcolors/python-webcolors.xibuild17
-rw-r--r--repo/qt5-qtwebchannel/qt5-qtwebchannel.xibuild34
-rw-r--r--repo/qt5-qtwebengine/0001-pretend-to-stay-at-5.15.3.patch8
-rw-r--r--repo/qt5-qtwebengine/0010-chromium-musl-Match-syscalls-to-match-musl.patch44
-rw-r--r--repo/qt5-qtwebengine/default-pthread-stacksize.patch23
-rw-r--r--repo/qt5-qtwebengine/ffmpeg5.patch151
-rw-r--r--repo/qt5-qtwebengine/fix-chromium-build.patch79
-rw-r--r--repo/qt5-qtwebengine/musl-hacks.patch13
-rw-r--r--repo/qt5-qtwebengine/musl-sandbox.patch181
-rw-r--r--repo/qt5-qtwebengine/nasm.patch13
-rw-r--r--repo/qt5-qtwebengine/qt-chromium-python3.patch1752
-rw-r--r--repo/qt5-qtwebengine/qt-musl-crashpad.patch13
-rw-r--r--repo/qt5-qtwebengine/qt-musl-dispatch_to_musl.patch103
-rw-r--r--repo/qt5-qtwebengine/qt-musl-elf-arm.patch13
-rw-r--r--repo/qt5-qtwebengine/qt-musl-execinfo.patch108
-rw-r--r--repo/qt5-qtwebengine/qt-musl-mallinfo.patch43
-rw-r--r--repo/qt5-qtwebengine/qt-musl-off_t.patch10
-rw-r--r--repo/qt5-qtwebengine/qt-musl-pread-pwrite.patch20
-rw-r--r--repo/qt5-qtwebengine/qt-musl-pvalloc.patch14
-rw-r--r--repo/qt5-qtwebengine/qt-musl-resolve.patch61
-rw-r--r--repo/qt5-qtwebengine/qt-musl-siginfo_t.patch18
-rw-r--r--repo/qt5-qtwebengine/qt-musl-stackstart.patch22
-rw-r--r--repo/qt5-qtwebengine/qt-musl-sysreg-for__WORDSIZE.patch14
-rw-r--r--repo/qt5-qtwebengine/qt-musl-thread-stacksize.patch26
-rw-r--r--repo/qt5-qtwebengine/qt-musl-tid-caching.patch81
-rw-r--r--repo/qt5-qtwebengine/qt5-qtwebengine.xibuild75
-rw-r--r--repo/qt5-qtwebengine/remove-glibc-check.patch78
-rw-r--r--repo/qt5-qtwebengine/sndio.patch142
-rw-r--r--repo/qt5-qtwebengine/support-python3.patch158
-rw-r--r--repo/qt5-qtwebsockets/qt5-qtwebsockets.xibuild31
-rw-r--r--repo/raylib/raylib.xibuild26
-rw-r--r--repo/rpcsvc-proto/rpcsvc-proto.xibuild23
-rw-r--r--repo/ruby-rake/ruby-rake.xibuild34
-rw-r--r--repo/tcsh/001-sysmalloc.patch15
-rw-r--r--repo/tcsh/6974bc35a5cda6eab748e364bd76a860ca66968b.patch22
-rw-r--r--repo/tcsh/csh.cshrc96
-rw-r--r--repo/tcsh/csh.login71
-rw-r--r--repo/tcsh/tcsh.post-install4
-rw-r--r--repo/tcsh/tcsh.post-upgrade4
-rw-r--r--repo/tcsh/tcsh.pre-deinstall4
-rw-r--r--repo/tcsh/tcsh.xibuild52
-rw-r--r--repo/virt-manager/fix-latest-libvirt-xml-output.patch108
-rw-r--r--repo/virt-manager/tests-remove-sgio-unfiltered.patch65
-rw-r--r--repo/virt-manager/virt-manager.xibuild30
-rw-r--r--repo/weechat-matrix/weechat-matrix.post-install13
-rw-r--r--repo/weechat-matrix/weechat-matrix.xibuild38
-rw-r--r--skip/ceph/10-musl-fixes.patch15
-rw-r--r--skip/ceph/11-dump_time_header_impl.patch34
-rw-r--r--skip/ceph/11-parse_rfc1123_alt.patch53
-rw-r--r--skip/ceph/11-s3_expiration_header.patch30
-rw-r--r--skip/ceph/12-package.json-resolutions.patch31
-rw-r--r--skip/ceph/20-pci.patch63
-rw-r--r--skip/ceph/30-32bit_fix.patch.noauto110
-rw-r--r--skip/ceph/30-cypress.patch.noauto14
-rw-r--r--skip/ceph/30-ubuntu-32bit-fixes.patch.noauto137
-rw-r--r--skip/ceph/31-32bit_fix_tests.patch.noauto66
-rw-r--r--skip/ceph/32-PurgeQueue.cc-cast.patch85
-rw-r--r--skip/ceph/32-upstream32bit.patch92
-rw-r--r--skip/ceph/32-upstream32bitcleanup.patch143
-rw-r--r--skip/ceph/35-fix_ErasureCodeShec.patch17
-rw-r--r--skip/ceph/37-fix_tests.patch86
-rw-r--r--skip/ceph/42-no-virtualenvs.patch71
-rw-r--r--skip/ceph/43-LogClock.h.patch18
-rw-r--r--skip/ceph/44-aarch64-erasure.patch129
-rw-r--r--skip/ceph/44-cmake-buildtype.patch38
-rw-r--r--skip/ceph/44-missing-include.patch16
-rw-r--r--skip/ceph/44-staticcast.patch13
-rw-r--r--skip/ceph/ceph-user.pre-install5
-rw-r--r--skip/ceph/ceph.confd17
-rw-r--r--skip/ceph/ceph.initd118
-rw-r--r--skip/ceph/ceph.xibuild120
-rw-r--r--skip/dotnet-sdk/dotnet-sdk.xibuild17
-rw-r--r--skip/zynaddsubfx/cmake-build-type-none.patch47
-rw-r--r--skip/zynaddsubfx/fix-bogus-strstr.patch16
-rw-r--r--skip/zynaddsubfx/fix-memset.patch11
-rw-r--r--skip/zynaddsubfx/zynaddsubfx.xibuild51
213 files changed, 10486 insertions, 0 deletions
diff --git a/repo/augeas/acf.aug b/repo/augeas/acf.aug
new file mode 100644
index 0000000..377c0a8
--- /dev/null
+++ b/repo/augeas/acf.aug
@@ -0,0 +1,7 @@
+(* Copyright (C) 2016 Kaarle Ritvanen *)
+
+module Acf =
+
+autoload xfm
+
+let xfm = transform IniFile.lns_loose (incl "/etc/acf/acf.conf")
diff --git a/repo/augeas/augeas.xibuild b/repo/augeas/augeas.xibuild
new file mode 100644
index 0000000..2064482
--- /dev/null
+++ b/repo/augeas/augeas.xibuild
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+NAME="augeas"
+DESC="A configuration editing tool"
+
+MAKEDEPS=" autoconf automake libxml2 readline libtool"
+
+PKG_VER=1.12.0
+SOURCE="http://download.augeas.net/augeas-$PKG_VER.tar.gz"
+
+ADDITIONAL="
+acf.aug
+awall.aug
+fix-test.patch
+"
+
+prepare() {
+ apply_patches
+ autoreconf -f -i
+}
+
+build() {
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --enable-static \
+ --enable-shared \
+ --disable-gnulib-tests
+ make
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+ rm -rf "$PKG_DEST"/usr/lib/charset.alias
+ # Upstream packaging mistake, this should never have been installed
+ rm -f "$PKG_DEST"/usr/bin/dump
+
+ local lens
+ for lens in acf awall; do
+ install -m 644 "$BUILD_ROOT"/$lens.aug "$PKG_DEST"/usr/share/augeas/lenses
+ done
+}
diff --git a/repo/augeas/awall.aug b/repo/augeas/awall.aug
new file mode 100644
index 0000000..e4432d2
--- /dev/null
+++ b/repo/augeas/awall.aug
@@ -0,0 +1,9 @@
+(* Copyright (C) 2018 Kaarle Ritvanen *)
+
+module Awall =
+
+autoload xfm
+
+let xfm = transform Json.lns (
+ incl "/etc/awall/*.json" . incl "/etc/awall/*/*.json"
+)
diff --git a/repo/augeas/fix-test.patch b/repo/augeas/fix-test.patch
new file mode 100644
index 0000000..540aeba
--- /dev/null
+++ b/repo/augeas/fix-test.patch
@@ -0,0 +1,13 @@
+diff --git a/tests/test-preserve.sh b/tests/test-preserve.sh
+index 40b8689..f3e9b87 100755
+--- a/tests/test-preserve.sh
++++ b/tests/test-preserve.sh
+@@ -40,7 +40,7 @@ if [ $? != 0 ] ; then
+ exit 1
+ fi
+
+-act_group=$(ls -l $hosts | sed -e 's/ */ /g' | cut -d ' ' -f 4)
++act_group=$(stat -c "%G" $hosts)
+ act_mode=$(ls -l $hosts | cut -b 1-10)
+ if [ $selinux = yes ] ; then
+ act_con=$(stat --format=%C $hosts | cut -d ':' -f 3)
diff --git a/repo/bridge-utils/bridge-utils.xibuild b/repo/bridge-utils/bridge-utils.xibuild
new file mode 100644
index 0000000..cc94b7b
--- /dev/null
+++ b/repo/bridge-utils/bridge-utils.xibuild
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+NAME="bridge-utils"
+DESC="Tools for configuring the Linux kernel 802.1d Ethernet Bridge"
+
+MAKEDEPS="autoconf linux-headers"
+
+PKG_VER=1.7.1
+SOURCE="https://git.kernel.org/pub/scm/network/bridge/bridge-utils.git/snapshot/bridge-utils-$PKG_VER.tar.gz"
+
+ADDITIONAL="
+fix-PATH_MAX-on-ppc64le.patch
+"
+
+prepare() {
+ apply_patches
+ autoconf
+}
+
+build() {
+ ./configure \
+ --prefix=/usr \
+ --mandir=/usr/share/man \
+ --libdir=/usr/lib \
+ --includedir=/usr/include
+ make
+}
+
+package() {
+ make install DESTDIR="$PKG_DEST"
+}
+
diff --git a/repo/bridge-utils/fix-PATH_MAX-on-ppc64le.patch b/repo/bridge-utils/fix-PATH_MAX-on-ppc64le.patch
new file mode 100644
index 0000000..3fa75b9
--- /dev/null
+++ b/repo/bridge-utils/fix-PATH_MAX-on-ppc64le.patch
@@ -0,0 +1,26 @@
+Author: Milan P. Stanić <mps@arvanta.net>
+Date: Sun May 30 07:40:11 2021 +0000
+
+Fix missing PATH_MAX on ppc64le
+
+--- a/libbridge/libbridge_devif.c 2021-05-30 07:59:46.533574878 +0000
++++ b/libbridge/libbridge_devif.c 2021-05-30 08:01:09.964036452 +0000
+@@ -24,6 +24,7 @@
+ #include <string.h>
+ #include <dirent.h>
+ #include <fcntl.h>
++#include <limits.h>
+
+ #include "libbridge.h"
+ #include "libbridge_private.h"
+
+--- a/libbridge/libbridge_init.c 2021-05-30 08:00:15.983737797 +0000
++++ b/libbridge/libbridge_init.c 2021-05-30 08:00:49.573923635 +0000
+@@ -24,6 +24,7 @@
+ #include <dirent.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
++#include <limits.h>
+
+ #include "libbridge.h"
+ #include "libbridge_private.h"
diff --git a/repo/ddcutil/ddcutil.xibuild b/repo/ddcutil/ddcutil.xibuild
new file mode 100644
index 0000000..457c922
--- /dev/null
+++ b/repo/ddcutil/ddcutil.xibuild
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+NAME="ddcutil"
+DESC="Query and change Linux monitor settings using DDC/CI and USB"
+
+MAKEDEPS="linux-headers autoconf automake libtool eudev libusb i2c-tools libxrandr glib kmod"
+
+PKG_VER=1.2.2
+SOURCE="https://github.com/rockowitz/ddcutil/archive/v$PKG_VER.tar.gz"
+
+prepare() {
+ NOCONFIGURE=1 ./autogen.sh
+}
+
+build() {
+ ./configure \
+ --prefix=/usr \
+ --mandir=/usr/share/man
+ make
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+}
+
diff --git a/repo/dmidecode/dmidecode.xibuild b/repo/dmidecode/dmidecode.xibuild
new file mode 100644
index 0000000..1ab5186
--- /dev/null
+++ b/repo/dmidecode/dmidecode.xibuild
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+NAME="dmidecode"
+DESC="A utility for reporting system hardware as described by BIOS"
+
+MAKEDEPS=""
+
+PKG_VER=3.3
+SOURCE="https://download.savannah.gnu.org/releases/dmidecode/dmidecode-$PKG_VER.tar.xz"
+
+prepare() {
+ apply_patches
+ sed -e '/^PROGRAMS !=/d' -e 's/-O2/-Os/' -i Makefile
+}
+
+build() {
+ make prefix=/usr
+}
+
+package() {
+ make prefix=/usr DESTDIR="$PKG_DEST" install
+}
+
diff --git a/repo/dnsmasq/0000-fix-heap-overflow-in-dns-replies.patch b/repo/dnsmasq/0000-fix-heap-overflow-in-dns-replies.patch
new file mode 100644
index 0000000..ab15361
--- /dev/null
+++ b/repo/dnsmasq/0000-fix-heap-overflow-in-dns-replies.patch
@@ -0,0 +1,66 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/dnsmasq-2.77-underflow.patch
+--
+From 77c7cabbeab1fbe1f7296f33762771f208586e59 Mon Sep 17 00:00:00 2001
+From: Doran Moppert <dmoppert@redhat.com>
+Date: Tue, 26 Sep 2017 14:48:20 +0930
+Subject: [PATCH] google patch hand-applied
+
+---
+ src/edns0.c | 10 +++++-----
+ src/forward.c | 4 ++++
+ src/rfc1035.c | 3 +++
+ 3 files changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/src/edns0.c b/src/edns0.c
+index 7bd26b8..7f96414 100644
+--- a/src/edns0.c
++++ b/src/edns0.c
+@@ -212,11 +212,11 @@ size_t add_pseudoheader(struct dns_header *header, size_t plen, unsigned char *l
+ /* Copy back any options */
+ if (buff)
+ {
+- if (p + rdlen > limit)
+- {
+- free(buff);
+- return plen; /* Too big */
+- }
++ if (p + rdlen > limit)
++ {
++ free(buff);
++ return plen; /* Too big */
++ }
+ memcpy(p, buff, rdlen);
+ free(buff);
+ p += rdlen;
+diff --git a/src/forward.c b/src/forward.c
+index 3d638e4..e254e35 100644
+--- a/src/forward.c
++++ b/src/forward.c
+@@ -1558,6 +1558,10 @@ void receive_query(struct listener *listen, time_t now)
+ udp_size = PACKETSZ; /* Sanity check - can't reduce below default. RFC 6891 6.2.3 */
+ }
+
++ // Make sure the udp size is not smaller than the incoming message so that we
++ // do not underflow
++ if (udp_size < n) udp_size = n;
++
+ #ifdef HAVE_CONNTRACK
+ #ifdef HAVE_AUTH
+ if (!auth_dns || local_auth)
+diff --git a/src/rfc1035.c b/src/rfc1035.c
+index 6fc4f26..66fa00c 100644
+--- a/src/rfc1035.c
++++ b/src/rfc1035.c
+@@ -1396,6 +1396,9 @@ size_t answer_request(struct dns_header *header, char *limit, size_t qlen,
+ size_t len;
+ int rd_bit = (header->hb3 & HB3_RD);
+
++ // Make sure we do not underflow here too.
++ if (qlen > (limit - ((char *)header))) return 0;
++
+ /* never answer queries with RD unset, to avoid cache snooping. */
+ if (ntohs(header->ancount) != 0 ||
+ ntohs(header->nscount) != 0 ||
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0001-Retry-on-interrupted-error-in-tftp.patch b/repo/dnsmasq/0001-Retry-on-interrupted-error-in-tftp.patch
new file mode 100644
index 0000000..6fa3ac3
--- /dev/null
+++ b/repo/dnsmasq/0001-Retry-on-interrupted-error-in-tftp.patch
@@ -0,0 +1,27 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0001-Retry-on-interrupted-error-in-tftp.patch (backport from upstream)
+--
+From f5f56c001dddd486859dc6301e6cbe00ba604fe8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Wed, 18 Aug 2021 10:09:35 +0200
+Subject: [PATCH 01/15] Retry on interrupted error in tftp
+
+Interrupt might arrive when sending error reply. Retry if possible.
+
+Wrong Check of Return Value
+
+diff --git a/src/tftp.c b/src/tftp.c
+index 37bdff2..3d87523 100644
+--- a/src/tftp.c
++++ b/src/tftp.c
+@@ -600,7 +600,7 @@ void check_tftp_listeners(time_t now)
+ /* Wrong source address. See rfc1350 para 4. */
+ prettyprint_addr(&peer, daemon->addrbuff);
+ len = tftp_err(ERR_TID, daemon->packet, _("ignoring packet from %s (TID mismatch)"), daemon->addrbuff);
+- sendto(transfer->sockfd, daemon->packet, len, 0, &peer.sa, sa_len(&peer));
++ while(retry_send(sendto(transfer->sockfd, daemon->packet, len, 0, &peer.sa, sa_len(&peer))));
+ }
+ }
+ }
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0002-Add-safety-checks-to-places-pointed-by-Coverity.patch b/repo/dnsmasq/0002-Add-safety-checks-to-places-pointed-by-Coverity.patch
new file mode 100644
index 0000000..1c84ecd
--- /dev/null
+++ b/repo/dnsmasq/0002-Add-safety-checks-to-places-pointed-by-Coverity.patch
@@ -0,0 +1,45 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0002-Add-safety-checks-to-places-pointed-by-Coverity.patch (backport from upstream)
+--
+From 061013293ceddce509ae06a31a045e803103f1ce Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Wed, 18 Aug 2021 14:59:23 +0200
+Subject: [PATCH 02/15] Add safety checks to places pointed by Coverity
+
+diff --git a/src/cache.c b/src/cache.c
+index 8add610..97c51a7 100644
+--- a/src/cache.c
++++ b/src/cache.c
+@@ -433,7 +433,7 @@ static struct crec *cache_scan_free(char *name, union all_addr *addr, unsigned s
+ else if (!(crecp->flags & (F_HOSTS | F_DHCP | F_CONFIG)) &&
+ (flags & crecp->flags & F_REVERSE) &&
+ (flags & crecp->flags & (F_IPV4 | F_IPV6)) &&
+- memcmp(&crecp->addr, addr, addrlen) == 0)
++ addr && memcmp(&crecp->addr, addr, addrlen) == 0)
+ {
+ *up = crecp->hash_next;
+ cache_unlink(crecp);
+@@ -2013,7 +2013,7 @@ void log_query(unsigned int flags, char *name, union all_addr *addr, char *arg)
+ else
+ source = "cached";
+
+- if (strlen(name) == 0)
++ if (name && !name[0])
+ name = ".";
+
+ if (option_bool(OPT_EXTRALOG))
+diff --git a/src/forward.c b/src/forward.c
+index 3d638e4..f07c908 100644
+--- a/src/forward.c
++++ b/src/forward.c
+@@ -2276,7 +2276,7 @@ int allocate_rfd(struct randfd_list **fdlp, struct server *serv)
+ }
+ }
+
+- if (j == daemon->numrrand)
++ if (!rfd) /* should be when j == daemon->numrrand */
+ {
+ struct randfd_list *rfl_poll;
+
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0003-Small-safeguard-to-unexpected-data.patch b/repo/dnsmasq/0003-Small-safeguard-to-unexpected-data.patch
new file mode 100644
index 0000000..8d90e96
--- /dev/null
+++ b/repo/dnsmasq/0003-Small-safeguard-to-unexpected-data.patch
@@ -0,0 +1,30 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0003-Small-safeguard-to-unexpected-data.patch
+--
+From 920cd815bafea084f68cc4309399aea77bd7f66b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 14:11:42 +0200
+Subject: [PATCH 03/15] Small safeguard to unexpected data
+
+Make sure negative index is not used for comparison. It seems code in
+option parsing does not allow it to be empty, but insist on it also in
+this place.
+---
+ src/dhcp-common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/dhcp-common.c b/src/dhcp-common.c
+index 73568a9..85b269a 100644
+--- a/src/dhcp-common.c
++++ b/src/dhcp-common.c
+@@ -88,7 +88,7 @@ int match_netid_wild(struct dhcp_netid *check, struct dhcp_netid *pool)
+ for (; check; check = check->next)
+ {
+ const int check_len = strlen(check->net);
+- const int is_wc = (check->net[check_len - 1] == '*');
++ const int is_wc = (check_len > 0 && check->net[check_len - 1] == '*');
+
+ /* '#' for not is for backwards compat. */
+ if (check->net[0] != '!' && check->net[0] != '#')
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0004-Fix-bunch-of-warnings-in-auth.c.patch b/repo/dnsmasq/0004-Fix-bunch-of-warnings-in-auth.c.patch
new file mode 100644
index 0000000..f3b7caa
--- /dev/null
+++ b/repo/dnsmasq/0004-Fix-bunch-of-warnings-in-auth.c.patch
@@ -0,0 +1,80 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0004-Fix-bunch-of-warnings-in-auth.c.patch (backport from upstream)
+--
+From e61af561900b4d2dd976a575b2efd388be092742 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 16:00:35 +0200
+Subject: [PATCH 04/15] Fix bunch of warnings in auth.c
+
+diff --git a/src/auth.c b/src/auth.c
+index 172a4b2..4f03c39 100644
+--- a/src/auth.c
++++ b/src/auth.c
+@@ -417,7 +417,6 @@ size_t answer_auth(struct dns_header *header, char *limit, size_t qlen, time_t n
+
+ if (!found && is_name_synthetic(flag, name, &addr) )
+ {
+- found = 1;
+ nxdomain = 0;
+
+ log_query(F_FORWARD | F_CONFIG | flag, name, &addr, NULL);
+@@ -433,7 +432,6 @@ size_t answer_auth(struct dns_header *header, char *limit, size_t qlen, time_t n
+ if (qtype == T_SOA)
+ {
+ auth = soa = 1; /* inhibits auth section */
+- found = 1;
+ log_query(F_RRNAME | F_AUTH, zone->domain, NULL, "<SOA>");
+ }
+ else if (qtype == T_AXFR)
+@@ -469,7 +467,6 @@ size_t answer_auth(struct dns_header *header, char *limit, size_t qlen, time_t n
+ soa = 1; /* inhibits auth section */
+ ns = 1; /* ensure we include NS records! */
+ axfr = 1;
+- found = 1;
+ axfroffset = nameoffset;
+ log_query(F_RRNAME | F_AUTH, zone->domain, NULL, "<AXFR>");
+ }
+@@ -477,7 +474,6 @@ size_t answer_auth(struct dns_header *header, char *limit, size_t qlen, time_t n
+ {
+ auth = 1;
+ ns = 1; /* inhibits auth section */
+- found = 1;
+ log_query(F_RRNAME | F_AUTH, zone->domain, NULL, "<NS>");
+ }
+ }
+@@ -498,7 +494,6 @@ size_t answer_auth(struct dns_header *header, char *limit, size_t qlen, time_t n
+ *cut = '.'; /* restore domain part */
+ log_query(crecp->flags, name, &crecp->addr, record_source(crecp->uid));
+ *cut = 0; /* remove domain part */
+- found = 1;
+ if (add_resource_record(header, limit, &trunc, nameoffset, &ansp,
+ daemon->auth_ttl, NULL, qtype, C_IN,
+ qtype == T_A ? "4" : "6", &crecp->addr))
+@@ -519,7 +514,6 @@ size_t answer_auth(struct dns_header *header, char *limit, size_t qlen, time_t n
+ if ((crecp->flags & flag) && (local_query || filter_zone(zone, flag, &(crecp->addr))))
+ {
+ log_query(crecp->flags, name, &crecp->addr, record_source(crecp->uid));
+- found = 1;
+ if (add_resource_record(header, limit, &trunc, nameoffset, &ansp,
+ daemon->auth_ttl, NULL, qtype, C_IN,
+ qtype == T_A ? "4" : "6", &crecp->addr))
+@@ -614,7 +608,7 @@ size_t answer_auth(struct dns_header *header, char *limit, size_t qlen, time_t n
+ if (subnet->prefixlen >= 16 )
+ p += sprintf(p, "%u.", a & 0xff);
+ a = a >> 8;
+- p += sprintf(p, "%u.in-addr.arpa", a & 0xff);
++ sprintf(p, "%u.in-addr.arpa", a & 0xff);
+
+ }
+ else
+@@ -627,7 +621,7 @@ size_t answer_auth(struct dns_header *header, char *limit, size_t qlen, time_t n
+ int dig = ((unsigned char *)&subnet->addr.addr6)[i>>3];
+ p += sprintf(p, "%.1x.", (i>>2) & 1 ? dig & 15 : dig >> 4);
+ }
+- p += sprintf(p, "ip6.arpa");
++ sprintf(p, "ip6.arpa");
+
+ }
+ }
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0005-Fix-few-coverity-warnings-in-lease-tools.patch b/repo/dnsmasq/0005-Fix-few-coverity-warnings-in-lease-tools.patch
new file mode 100644
index 0000000..dafed7d
--- /dev/null
+++ b/repo/dnsmasq/0005-Fix-few-coverity-warnings-in-lease-tools.patch
@@ -0,0 +1,92 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0005-Fix-few-coverity-warnings-in-lease-tools.patch (backport from upstream)
+--
+From be7f213066282baeed46cc34223601c462db9cbf Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 16:32:05 +0200
+Subject: [PATCH 05/15] Fix few coverity warnings in lease-tools
+
+diff --git a/contrib/lease-tools/dhcp_release.c b/contrib/lease-tools/dhcp_release.c
+index c1c835b..84f5610 100644
+--- a/contrib/lease-tools/dhcp_release.c
++++ b/contrib/lease-tools/dhcp_release.c
+@@ -280,6 +280,7 @@ int main(int argc, char **argv)
+
+ /* This voodoo fakes up a packet coming from the correct interface, which really matters for
+ a DHCP server */
++ memset(&ifr, 0, sizeof(ifr));
+ strncpy(ifr.ifr_name, argv[1], sizeof(ifr.ifr_name)-1);
+ ifr.ifr_name[sizeof(ifr.ifr_name)-1] = '\0';
+ if (setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, &ifr, sizeof(ifr)) == -1)
+diff --git a/contrib/lease-tools/dhcp_release6.c b/contrib/lease-tools/dhcp_release6.c
+index d680222..9b3438f 100644
+--- a/contrib/lease-tools/dhcp_release6.c
++++ b/contrib/lease-tools/dhcp_release6.c
+@@ -318,6 +318,12 @@ void usage(const char* arg, FILE* stream)
+ fprintf (stream, "Usage: %s %s\n", arg, usage_string);
+ }
+
++static void fail_fatal(const char *errstr, int exitcode)
++{
++ perror(errstr);
++ exit(exitcode);
++}
++
+ int send_release_packet(const char* iface, struct dhcp6_packet* packet)
+ {
+ struct sockaddr_in6 server_addr, client_addr;
+@@ -343,18 +349,19 @@ int send_release_packet(const char* iface, struct dhcp6_packet* packet)
+ client_addr.sin6_port = htons(DHCP6_CLIENT_PORT);
+ client_addr.sin6_flowinfo = 0;
+ client_addr.sin6_scope_id =0;
+- inet_pton(AF_INET6, "::", &client_addr.sin6_addr);
+- bind(sock, (struct sockaddr*)&client_addr, sizeof(struct sockaddr_in6));
+- inet_pton(AF_INET6, DHCP6_MULTICAST_ADDRESS, &server_addr.sin6_addr);
++ if (inet_pton(AF_INET6, "::", &client_addr.sin6_addr) <= 0)
++ fail_fatal("inet_pton", 5);
++ if (bind(sock, (struct sockaddr*)&client_addr, sizeof(struct sockaddr_in6)) != 0)
++ perror("bind"); /* continue on bind error */
++ if (inet_pton(AF_INET6, DHCP6_MULTICAST_ADDRESS, &server_addr.sin6_addr) <= 0)
++ fail_fatal("inet_pton", 5);
+ server_addr.sin6_port = htons(DHCP6_SERVER_PORT);
+- int16_t recv_size = 0;
++ ssize_t recv_size = 0;
++ int result;
+ for (i = 0; i < 5; i++)
+ {
+ if (sendto(sock, packet->buf, packet->len, 0, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0)
+- {
+- perror("sendto failed");
+- exit(4);
+- }
++ fail_fatal("sendto failed", 4);
+
+ recv_size = recvfrom(sock, response, sizeof(response), MSG_DONTWAIT, NULL, 0);
+ if (recv_size == -1)
+@@ -367,16 +374,18 @@ int send_release_packet(const char* iface, struct dhcp6_packet* packet)
+ else
+ {
+ perror("recvfrom");
++ result = UNSPEC_FAIL;
+ }
+ }
+-
+- int16_t result = parse_packet(response, recv_size);
+- if (result == NOT_REPLY_CODE)
++ else
+ {
+- sleep(1);
+- continue;
++ result = parse_packet(response, recv_size);
++ if (result == NOT_REPLY_CODE)
++ {
++ sleep(1);
++ continue;
++ }
+ }
+-
+ close(sock);
+ return result;
+ }
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0006-Fix-coverity-formats-issues-in-blockdata.patch b/repo/dnsmasq/0006-Fix-coverity-formats-issues-in-blockdata.patch
new file mode 100644
index 0000000..441fbef
--- /dev/null
+++ b/repo/dnsmasq/0006-Fix-coverity-formats-issues-in-blockdata.patch
@@ -0,0 +1,23 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0006-Fix-coverity-formats-issues-in-blockdata.patch (backport from upstream)
+--
+From 3a077065ce846e301b532127ebecdd2771ad75ed Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 16:41:00 +0200
+Subject: [PATCH 06/15] Fix coverity formats issues in blockdata
+
+diff --git a/src/blockdata.c b/src/blockdata.c
+index f7740b5..0986285 100644
+--- a/src/blockdata.c
++++ b/src/blockdata.c
+@@ -52,7 +52,7 @@ void blockdata_init(void)
+
+ void blockdata_report(void)
+ {
+- my_syslog(LOG_INFO, _("pool memory in use %u, max %u, allocated %u"),
++ my_syslog(LOG_INFO, _("pool memory in use %zu, max %zu, allocated %zu"),
+ blockdata_count * sizeof(struct blockdata),
+ blockdata_hwm * sizeof(struct blockdata),
+ blockdata_alloced * sizeof(struct blockdata));
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0007-Retry-dhcp6-ping-on-interrupts.patch b/repo/dnsmasq/0007-Retry-dhcp6-ping-on-interrupts.patch
new file mode 100644
index 0000000..7fea553
--- /dev/null
+++ b/repo/dnsmasq/0007-Retry-dhcp6-ping-on-interrupts.patch
@@ -0,0 +1,23 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0007-Retry-dhcp6-ping-on-interrupts.patch (backport from upstream)
+--
+From 467b621fb7da6e1318ac7204325b0adb01b3ff19 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 16:48:50 +0200
+Subject: [PATCH 07/15] Retry dhcp6 ping on interrupts
+
+diff --git a/src/dhcp6.c b/src/dhcp6.c
+index 2be877f..ae1f5c1 100644
+--- a/src/dhcp6.c
++++ b/src/dhcp6.c
+@@ -292,7 +292,7 @@ void get_client_mac(struct in6_addr *client, int iface, unsigned char *mac, unsi
+ if ((maclen = find_mac(&addr, mac, 0, now)) != 0)
+ break;
+
+- sendto(daemon->icmp6fd, &neigh, sizeof(neigh), 0, &addr.sa, sizeof(addr));
++ while(retry_send(sendto(daemon->icmp6fd, &neigh, sizeof(neigh), 0, &addr.sa, sizeof(addr))));
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 100000000; /* 100ms */
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0008-Fix-coverity-warnings-on-dbus.patch b/repo/dnsmasq/0008-Fix-coverity-warnings-on-dbus.patch
new file mode 100644
index 0000000..160d4d0
--- /dev/null
+++ b/repo/dnsmasq/0008-Fix-coverity-warnings-on-dbus.patch
@@ -0,0 +1,84 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0008-Fix-coverity-warnings-on-dbus.patch (backport from upstream)
+--
+From bbfdf6a435cbd5f71ae76f962ce86786346589aa Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 17:19:05 +0200
+Subject: [PATCH 08/15] Fix coverity warnings on dbus
+
+diff --git a/src/dbus.c b/src/dbus.c
+index cbdce9c..d746b9a 100644
+--- a/src/dbus.c
++++ b/src/dbus.c
+@@ -114,7 +114,7 @@ static dbus_bool_t add_watch(DBusWatch *watch, void *data)
+ w->next = daemon->watches;
+ daemon->watches = w;
+
+- w = data; /* no warning */
++ (void)data; /* no warning */
+ return TRUE;
+ }
+
+@@ -134,16 +134,20 @@ static void remove_watch(DBusWatch *watch, void *data)
+ up = &(w->next);
+ }
+
+- w = data; /* no warning */
++ (void)data; /* no warning */
+ }
+
+-static void dbus_read_servers(DBusMessage *message)
++static DBusMessage* dbus_read_servers(DBusMessage *message)
+ {
+ DBusMessageIter iter;
+ union mysockaddr addr, source_addr;
+ char *domain;
+
+- dbus_message_iter_init(message, &iter);
++ if (!dbus_message_iter_init(message, &iter))
++ {
++ return dbus_message_new_error(message, DBUS_ERROR_INVALID_ARGS,
++ "Failed to initialize dbus message iter");
++ }
+
+ mark_servers(SERV_FROM_DBUS);
+
+@@ -222,6 +226,7 @@ static void dbus_read_servers(DBusMessage *message)
+
+ /* unlink and free anything still marked. */
+ cleanup_servers();
++ return NULL;
+ }
+
+ #ifdef HAVE_LOOP
+@@ -545,6 +550,10 @@ static DBusMessage *dbus_add_lease(DBusMessage* message)
+ "Invalid IP address '%s'", ipaddr);
+
+ hw_len = parse_hex((char*)hwaddr, dhcp_chaddr, DHCP_CHADDR_MAX, NULL, &hw_type);
++ if (hw_len < 0)
++ return dbus_message_new_error_printf(message, DBUS_ERROR_INVALID_ARGS,
++ "Invalid HW address '%s'", hwaddr);
++
+ if (hw_type == 0 && hw_len != 0)
+ hw_type = ARPHRD_ETHER;
+
+@@ -668,7 +677,7 @@ DBusHandlerResult message_handler(DBusConnection *connection,
+ #endif
+ else if (strcmp(method, "SetServers") == 0)
+ {
+- dbus_read_servers(message);
++ reply = dbus_read_servers(message);
+ new_servers = 1;
+ }
+ else if (strcmp(method, "SetServersEx") == 0)
+@@ -719,7 +728,7 @@ DBusHandlerResult message_handler(DBusConnection *connection,
+ if (clear_cache)
+ clear_cache_and_reload(dnsmasq_time());
+
+- method = user_data; /* no warning */
++ (void)user_data; /* no warning */
+
+ /* If no reply or no error, return nothing */
+ if (!reply)
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0009-Address-coverity-issues-detected-in-util.c.patch b/repo/dnsmasq/0009-Address-coverity-issues-detected-in-util.c.patch
new file mode 100644
index 0000000..4dbf56d
--- /dev/null
+++ b/repo/dnsmasq/0009-Address-coverity-issues-detected-in-util.c.patch
@@ -0,0 +1,58 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0009-Address-coverity-issues-detected-in-util.c.patch (backport from upstream)
+--
+From 7b975696a7bda5b86fcf168644f177544adb6fe9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 17:38:26 +0200
+Subject: [PATCH 09/15] Address coverity issues detected in util.c
+
+diff --git a/src/util.c b/src/util.c
+index 1425764..8e69d55 100644
+--- a/src/util.c
++++ b/src/util.c
+@@ -208,6 +208,8 @@ char *canonicalise(char *in, int *nomem)
+ /* older libidn2 strips underscores, so don't do IDN processing
+ if the name has an underscore (check_name() returned 2) */
+ if (rc != 2)
++#else
++ (void)rc;
+ #endif
+ #if defined(HAVE_IDN) || defined(HAVE_LIBIDN2)
+ {
+@@ -235,11 +237,14 @@ char *canonicalise(char *in, int *nomem)
+ return ret;
+ }
+ #endif
+-
++
++#if !defined(HAVE_LIBIDN2) || (defined(HAVE_LIBIDN2) && (!defined(IDN2_VERSION_NUMBER) || IDN2_VERSION_NUMBER < 0x02000003))
++ /* If recent libidn2 is used, it cannot reach this code. */
+ if ((ret = whine_malloc(strlen(in)+1)))
+ strcpy(ret, in);
+ else if (nomem)
+- *nomem = 1;
++ *nomem = 1;
++#endif
+
+ return ret;
+ }
+@@ -528,7 +533,7 @@ void prettyprint_time(char *buf, unsigned int t)
+ if ((x = (t/60)%60))
+ p += sprintf(&buf[p], "%um", x);
+ if ((x = t%60))
+- p += sprintf(&buf[p], "%us", x);
++ sprintf(&buf[p], "%us", x);
+ }
+ }
+
+@@ -574,7 +579,7 @@ int parse_hex(char *in, unsigned char *out, int maxlen,
+ int j, bytes = (1 + (r - in))/2;
+ for (j = 0; j < bytes; j++)
+ {
+- char sav = sav;
++ char sav;
+ if (j < bytes - 1)
+ {
+ sav = in[(j+1)*2];
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0010-Fix-coverity-detected-issues-in-option.c.patch b/repo/dnsmasq/0010-Fix-coverity-detected-issues-in-option.c.patch
new file mode 100644
index 0000000..25b271f
--- /dev/null
+++ b/repo/dnsmasq/0010-Fix-coverity-detected-issues-in-option.c.patch
@@ -0,0 +1,135 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0010-Fix-coverity-detected-issues-in-option.c.patch (backport from upstream)
+--
+From db835f8c40e83c6392e69ffc7f2cc500f7682dd4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 19:23:20 +0200
+Subject: [PATCH 10/15] Fix coverity detected issues in option.c
+
+diff --git a/src/option.c b/src/option.c
+index ffce9fc..11655fd 100644
+--- a/src/option.c
++++ b/src/option.c
+@@ -798,7 +798,7 @@ static void do_usage(void)
+
+ if (usage[i].arg)
+ {
+- strcpy(buff, usage[i].arg);
++ safe_strncpy(buff, usage[i].arg, sizeof(buff));
+ for (j = 0; tab[j].handle; j++)
+ if (tab[j].handle == *(usage[i].arg))
+ sprintf(buff, "%d", tab[j].val);
+@@ -959,7 +959,7 @@ static int domain_rev4(char *domain, struct in_addr addr, int msize)
+ return 0;
+ }
+
+- domain += sprintf(domain, "in-addr.arpa");
++ sprintf(domain, "in-addr.arpa");
+
+ return 1;
+ }
+@@ -978,7 +978,7 @@ static int domain_rev6(char *domain, struct in6_addr *addr, int msize)
+ int dig = ((unsigned char *)addr)[i>>3];
+ domain += sprintf(domain, "%.1x.", (i>>2) & 1 ? dig & 15 : dig >> 4);
+ }
+- domain += sprintf(domain, "ip6.arpa");
++ sprintf(domain, "ip6.arpa");
+
+ return 1;
+ }
+@@ -1829,6 +1829,8 @@ static int one_opt(int option, char *arg, char *errstr, char *gen_err, int comma
+ new->next = li;
+ *up = new;
+ }
++ else
++ free(path);
+
+ }
+
+@@ -1995,7 +1997,11 @@ static int one_opt(int option, char *arg, char *errstr, char *gen_err, int comma
+
+ if (!(name = canonicalise_opt(arg)) ||
+ (comma && !(target = canonicalise_opt(comma))))
+- ret_err(_("bad MX name"));
++ {
++ free(name);
++ free(target);
++ ret_err(_("bad MX name"));
++ }
+
+ new = opt_malloc(sizeof(struct mx_srv_record));
+ new->next = daemon->mxnames;
+@@ -3616,6 +3622,7 @@ static int one_opt(int option, char *arg, char *errstr, char *gen_err, int comma
+ inet_ntop(AF_INET, &in, daemon->addrbuff, ADDRSTRLEN);
+ sprintf(errstr, _("duplicate dhcp-host IP address %s"),
+ daemon->addrbuff);
++ dhcp_config_free(new);
+ return 0;
+ }
+ }
+@@ -3779,16 +3786,16 @@ static int one_opt(int option, char *arg, char *errstr, char *gen_err, int comma
+
+ case LOPT_NAME_MATCH: /* --dhcp-name-match */
+ {
+- struct dhcp_match_name *new = opt_malloc(sizeof(struct dhcp_match_name));
+- struct dhcp_netid *id = opt_malloc(sizeof(struct dhcp_netid));
++ struct dhcp_match_name *new;
+ ssize_t len;
+
+ if (!(comma = split(arg)) || (len = strlen(comma)) == 0)
+ ret_err(gen_err);
+
++ new = opt_malloc(sizeof(struct dhcp_match_name));
+ new->wildcard = 0;
+- new->netid = id;
+- id->net = opt_string_alloc(set_prefix(arg));
++ new->netid = opt_malloc(sizeof(struct dhcp_netid));
++ new->netid->net = opt_string_alloc(set_prefix(arg));
+
+ if (comma[len-1] == '*')
+ {
+@@ -3992,6 +3999,8 @@ static int one_opt(int option, char *arg, char *errstr, char *gen_err, int comma
+ }
+ }
+
++ dhcp_netid_free(new->netid);
++ free(new);
+ ret_err(gen_err);
+ }
+
+@@ -4367,7 +4376,7 @@ err:
+ case LOPT_CNAME: /* --cname */
+ {
+ struct cname *new;
+- char *alias, *target, *last, *pen;
++ char *alias, *target=NULL, *last, *pen;
+ int ttl = -1;
+
+ for (last = pen = NULL, comma = arg; comma; comma = split(comma))
+@@ -4382,13 +4391,13 @@ err:
+ if (pen != arg && atoi_check(last, &ttl))
+ last = pen;
+
+- target = canonicalise_opt(last);
+-
+ while (arg != last)
+ {
+ int arglen = strlen(arg);
+ alias = canonicalise_opt(arg);
+
++ if (!target)
++ target = canonicalise_opt(last);
+ if (!alias || !target)
+ {
+ free(target);
+@@ -4691,7 +4700,7 @@ err:
+ struct name_list *nl;
+ if (!canon)
+ {
+- struct name_list *tmp = new->names, *next;
++ struct name_list *tmp, *next;
+ for (tmp = new->names; tmp; tmp = next)
+ {
+ next = tmp->next;
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0011-Fix-coverity-detected-issue-in-radv.c.patch b/repo/dnsmasq/0011-Fix-coverity-detected-issue-in-radv.c.patch
new file mode 100644
index 0000000..d3a9819
--- /dev/null
+++ b/repo/dnsmasq/0011-Fix-coverity-detected-issue-in-radv.c.patch
@@ -0,0 +1,23 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0011-Fix-coverity-detected-issue-in-radv.c.patch (backport from upstream)
+--
+From 9c088b29dcdb8a3e013120d8272a6e0314a8f3df Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 19:29:23 +0200
+Subject: [PATCH 11/15] Fix coverity detected issue in radv.c
+
+diff --git a/src/radv.c b/src/radv.c
+index 3255904..6d6fa32 100644
+--- a/src/radv.c
++++ b/src/radv.c
+@@ -746,6 +746,8 @@ static int add_lla(int index, unsigned int type, char *mac, size_t maclen, void
+ add 7 to round up */
+ int len = (maclen + 9) >> 3;
+ unsigned char *p = expand(len << 3);
++ if (!p)
++ return 1;
+ memset(p, 0, len << 3);
+ *p++ = ICMP6_OPT_SOURCE_MAC;
+ *p++ = len;
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0012-Fix-coverity-detected-issues-in-cache.c.patch b/repo/dnsmasq/0012-Fix-coverity-detected-issues-in-cache.c.patch
new file mode 100644
index 0000000..b98f71f
--- /dev/null
+++ b/repo/dnsmasq/0012-Fix-coverity-detected-issues-in-cache.c.patch
@@ -0,0 +1,23 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0012-Fix-coverity-detected-issues-in-cache.c.patch (backport from upstream)
+--
+From 957b2b25238d82a6c3afced2ff0423ad171fb22e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 20:10:37 +0200
+Subject: [PATCH 12/15] Fix coverity detected issues in cache.c
+
+diff --git a/src/cache.c b/src/cache.c
+index 97c51a7..6722fa6 100644
+--- a/src/cache.c
++++ b/src/cache.c
+@@ -1188,7 +1188,7 @@ void cache_reload(void)
+ struct host_record *hr;
+ struct name_list *nl;
+ struct cname *a;
+- struct crec lrec;
++ struct crec lrec = { 0, };
+ struct mx_srv_record *mx;
+ struct txt_record *txt;
+ struct interface_name *intr;
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0013-Fix-coverity-issues-detected-in-domain-match.c.patch b/repo/dnsmasq/0013-Fix-coverity-issues-detected-in-domain-match.c.patch
new file mode 100644
index 0000000..7b8db66
--- /dev/null
+++ b/repo/dnsmasq/0013-Fix-coverity-issues-detected-in-domain-match.c.patch
@@ -0,0 +1,60 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0013-Fix-coverity-issues-detected-in-domain-match.c.patch (backport from upstream)
+--
+From 0dafe990a1395d597bc6022c3936769f7a0ddea7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 21:16:22 +0200
+Subject: [PATCH 13/15] Fix coverity issues detected in domain-match.c
+
+diff --git a/src/domain-match.c b/src/domain-match.c
+index f8e4796..7124c18 100644
+--- a/src/domain-match.c
++++ b/src/domain-match.c
+@@ -411,7 +411,8 @@ size_t make_local_answer(int flags, int gotname, size_t size, struct dns_header
+ addr.addr4 = srv->addr;
+
+ header->ancount = htons(ntohs(header->ancount) + 1);
+- add_resource_record(header, limit, &trunc, sizeof(struct dns_header), &p, daemon->local_ttl, NULL, T_A, C_IN, "4", &addr);
++ if (!add_resource_record(header, limit, &trunc, sizeof(struct dns_header), &p, daemon->local_ttl, NULL, T_A, C_IN, "4", &addr))
++ return 0;
+ log_query((flags | F_CONFIG | F_FORWARD) & ~F_IPV6, name, (union all_addr *)&addr, NULL);
+ }
+
+@@ -426,7 +427,8 @@ size_t make_local_answer(int flags, int gotname, size_t size, struct dns_header
+ addr.addr6 = srv->addr;
+
+ header->ancount = htons(ntohs(header->ancount) + 1);
+- add_resource_record(header, limit, &trunc, sizeof(struct dns_header), &p, daemon->local_ttl, NULL, T_AAAA, C_IN, "6", &addr);
++ if (!add_resource_record(header, limit, &trunc, sizeof(struct dns_header), &p, daemon->local_ttl, NULL, T_AAAA, C_IN, "6", &addr))
++ return 0;
+ log_query((flags | F_CONFIG | F_FORWARD) & ~F_IPV4, name, (union all_addr *)&addr, NULL);
+ }
+
+@@ -609,9 +611,11 @@ int add_update_server(int flags,
+
+ if (*domain == 0)
+ alloc_domain = whine_malloc(1);
+- else if (!(alloc_domain = canonicalise((char *)domain, NULL)))
++ else
++ alloc_domain = canonicalise((char *)domain, NULL);
++ if (!alloc_domain)
+ return 0;
+-
++
+ /* See if there is a suitable candidate, and unmark
+ only do this for forwarding servers, not
+ address or local, to avoid delays on large numbers. */
+@@ -643,7 +647,10 @@ int add_update_server(int flags,
+ size = sizeof(struct server);
+
+ if (!(serv = whine_malloc(size)))
+- return 0;
++ {
++ free(alloc_domain);
++ return 0;
++ }
+
+ if (flags & SERV_IS_LOCAL)
+ {
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0014-Fix-coverity-detected-issues-in-dnsmasq.c.patch b/repo/dnsmasq/0014-Fix-coverity-detected-issues-in-dnsmasq.c.patch
new file mode 100644
index 0000000..148a4b3
--- /dev/null
+++ b/repo/dnsmasq/0014-Fix-coverity-detected-issues-in-dnsmasq.c.patch
@@ -0,0 +1,69 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0014-Fix-coverity-detected-issues-in-dnsmasq.c.patch (backport from upstream)
+--
+From f476acbe3c2830e6ff0c50cc36d364a3f3f4fadb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 22:45:29 +0200
+Subject: [PATCH 14/15] Fix coverity detected issues in dnsmasq.c
+
+diff --git a/src/dnsmasq.c b/src/dnsmasq.c
+index 602daed..3e1bfe8 100644
+--- a/src/dnsmasq.c
++++ b/src/dnsmasq.c
+@@ -34,7 +34,6 @@ static void poll_resolv(int force, int do_reload, time_t now);
+
+ int main (int argc, char **argv)
+ {
+- int bind_fallback = 0;
+ time_t now;
+ struct sigaction sigact;
+ struct iname *if_tmp;
+@@ -59,6 +58,8 @@ int main (int argc, char **argv)
+ int did_bind = 0;
+ struct server *serv;
+ char *netlink_warn;
++#else
++ int bind_fallback = 0;
+ #endif
+ #if defined(HAVE_DHCP) || defined(HAVE_DHCP6)
+ struct dhcp_context *context;
+@@ -377,7 +378,7 @@ int main (int argc, char **argv)
+ bindtodevice(bound_device, daemon->dhcpfd);
+ did_bind = 1;
+ }
+- if (daemon->enable_pxe && bound_device)
++ if (daemon->enable_pxe && bound_device && daemon->pxefd != -1)
+ {
+ bindtodevice(bound_device, daemon->pxefd);
+ did_bind = 1;
+@@ -920,8 +921,10 @@ int main (int argc, char **argv)
+ my_syslog(LOG_WARNING, _("warning: failed to change owner of %s: %s"),
+ daemon->log_file, strerror(log_err));
+
++#ifndef HAVE_LINUX_NETWORK
+ if (bind_fallback)
+ my_syslog(LOG_WARNING, _("setting --bind-interfaces option because of OS limitations"));
++#endif
+
+ if (option_bool(OPT_NOWILD))
+ warn_bound_listeners();
+@@ -1575,7 +1578,7 @@ static void async_event(int pipe, time_t now)
+ {
+ /* block in writes until all done */
+ if ((i = fcntl(daemon->helperfd, F_GETFL)) != -1)
+- fcntl(daemon->helperfd, F_SETFL, i & ~O_NONBLOCK);
++ while(retry_send(fcntl(daemon->helperfd, F_SETFL, i & ~O_NONBLOCK)));
+ do {
+ helper_write();
+ } while (!helper_buf_empty() || do_script_run(now));
+@@ -1984,7 +1987,7 @@ static void check_dns_listeners(time_t now)
+ attribute from the listening socket.
+ Reset that here. */
+ if ((flags = fcntl(confd, F_GETFL, 0)) != -1)
+- fcntl(confd, F_SETFL, flags & ~O_NONBLOCK);
++ while(retry_send(fcntl(confd, F_SETFL, flags & ~O_NONBLOCK)));
+
+ buff = tcp_request(confd, now, &tcp_addr, netmask, auth_dns);
+
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0015-Fix-coverity-issues-in-dnssec.c.patch b/repo/dnsmasq/0015-Fix-coverity-issues-in-dnssec.c.patch
new file mode 100644
index 0000000..7f9d5d3
--- /dev/null
+++ b/repo/dnsmasq/0015-Fix-coverity-issues-in-dnssec.c.patch
@@ -0,0 +1,35 @@
+Patch-Source: https://src.fedoraproject.org/rpms/dnsmasq/blob/f36/f/0015-Fix-coverity-issues-in-dnssec.c.patch (backport from upstream)
+--
+From 82c23fb1f0d9e46c6ce4bc4a57f0d377cc6089b7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Fri, 3 Sep 2021 22:51:36 +0200
+Subject: [PATCH 15/15] Fix coverity issues in dnssec.c
+
+diff --git a/src/dnssec.c b/src/dnssec.c
+index 94ebb6f..8800a5b 100644
+--- a/src/dnssec.c
++++ b/src/dnssec.c
+@@ -724,7 +724,8 @@ static int validate_rrset(time_t now, struct dns_header *header, size_t plen, in
+
+ /* namebuff used for workspace above, restore to leave unchanged on exit */
+ p = (unsigned char*)(rrset[0]);
+- extract_name(header, plen, &p, name, 1, 0);
++ if (!extract_name(header, plen, &p, name, 1, 0))
++ return STAT_BOGUS;
+
+ if (key)
+ {
+@@ -1017,7 +1018,9 @@ int dnssec_validate_ds(time_t now, struct dns_header *header, size_t plen, char
+ }
+
+ p = (unsigned char *)(header+1);
+- extract_name(header, plen, &p, name, 1, 4);
++ if (!extract_name(header, plen, &p, name, 1, 4))
++ return STAT_BOGUS;
++
+ p += 4; /* qtype, qclass */
+
+ /* If the key needed to validate the DS is on the same domain as the DS, we'll
+--
+2.31.1
+
diff --git a/repo/dnsmasq/0020-Fix-crash-after-re-reading-empty-resolv.conf.patch b/repo/dnsmasq/0020-Fix-crash-after-re-reading-empty-resolv.conf.patch
new file mode 100644
index 0000000..169897e
--- /dev/null
+++ b/repo/dnsmasq/0020-Fix-crash-after-re-reading-empty-resolv.conf.patch
@@ -0,0 +1,38 @@
+Patch-Source: https://thekelleys.org.uk/gitweb/?p=dnsmasq.git;a=commit;h=d290630d31f4517ab26392d00753d1397f9a4114 (upstream)
+--
+From d290630d31f4517ab26392d00753d1397f9a4114 Mon Sep 17 00:00:00 2001
+From: Simon Kelley <simon@thekelleys.org.uk>
+Date: Wed, 6 Oct 2021 22:31:06 +0100
+Subject: [PATCH] Fix crash after re-reading an empty resolv.conf file.
+
+If dnsmasq re-reads a resolv file, and it's empty, it will
+retry after a delay. In the meantime, the old servers from the
+resolv file have been deleted, but the servers_array doesn't
+get updated, leading to dangling pointers and crashes.
+
+Thanks to Brad Jorsch for finding and analysing this bug.
+
+This problem was introduced in 2.86.
+---
+ src/dnsmasq.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/src/dnsmasq.c b/src/dnsmasq.c
+index c7fa024..9516680 100644
+--- a/src/dnsmasq.c
++++ b/src/dnsmasq.c
+@@ -1682,6 +1682,11 @@ static void poll_resolv(int force, int do_reload, time_t now)
+ }
+ else
+ {
++ /* If we're delaying things, we don't call check_servers(), but
++ reload_servers() may have deleted some servers, rendering the server_array
++ invalid, so just rebuild that here. Once reload_servers() succeeds,
++ we call check_servers() above, which calls build_server_array itself. */
++ build_server_array();
+ latest->mtime = 0;
+ if (!warned)
+ {
+--
+2.20.1
+
diff --git a/repo/dnsmasq/CVE-2022-0934.patch b/repo/dnsmasq/CVE-2022-0934.patch
new file mode 100644
index 0000000..1381626
--- /dev/null
+++ b/repo/dnsmasq/CVE-2022-0934.patch
@@ -0,0 +1,189 @@
+Patch-Source: https://thekelleys.org.uk/gitweb/?p=dnsmasq.git;a=commit;h=03345ecefeb0d82e3c3a4c28f27c3554f0611b39 (upstream)
+--
+From 03345ecefeb0d82e3c3a4c28f27c3554f0611b39 Mon Sep 17 00:00:00 2001
+From: Simon Kelley <simon@thekelleys.org.uk>
+Date: Thu, 31 Mar 2022 21:35:20 +0100
+Subject: [PATCH] Fix write-after-free error in DHCPv6 code. CVE-2022-0934
+ refers.
+
+---
+ CHANGELOG | 3 +++
+ src/rfc3315.c | 48 +++++++++++++++++++++++++++---------------------
+ 2 files changed, 30 insertions(+), 21 deletions(-)
+
+diff --git a/CHANGELOG b/CHANGELOG
+index 87d6c2b..4bc7fb1 100644
+--- a/CHANGELOG
++++ b/CHANGELOG
+@@ -55,6 +55,9 @@ version 2.87
+ doesn't require hard-coding addresses. Thanks to Sten Spans for
+ the idea.
+
++ Fix write-after-free error in DHCPv6 server code.
++ CVE-2022-0934 refers.
++
+
+ version 2.86
+ Handle DHCPREBIND requests in the DHCPv6 server code.
+diff --git a/src/rfc3315.c b/src/rfc3315.c
+index cee8382..e218d26 100644
+--- a/src/rfc3315.c
++++ b/src/rfc3315.c
+@@ -33,9 +33,9 @@ struct state {
+ unsigned int mac_len, mac_type;
+ };
+
+-static int dhcp6_maybe_relay(struct state *state, void *inbuff, size_t sz,
++static int dhcp6_maybe_relay(struct state *state, unsigned char *inbuff, size_t sz,
+ struct in6_addr *client_addr, int is_unicast, time_t now);
+-static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_t sz, int is_unicast, time_t now);
++static int dhcp6_no_relay(struct state *state, int msg_type, unsigned char *inbuff, size_t sz, int is_unicast, time_t now);
+ static void log6_opts(int nest, unsigned int xid, void *start_opts, void *end_opts);
+ static void log6_packet(struct state *state, char *type, struct in6_addr *addr, char *string);
+ static void log6_quiet(struct state *state, char *type, struct in6_addr *addr, char *string);
+@@ -104,12 +104,12 @@ unsigned short dhcp6_reply(struct dhcp_context *context, int interface, char *if
+ }
+
+ /* This cost me blood to write, it will probably cost you blood to understand - srk. */
+-static int dhcp6_maybe_relay(struct state *state, void *inbuff, size_t sz,
++static int dhcp6_maybe_relay(struct state *state, unsigned char *inbuff, size_t sz,
+ struct in6_addr *client_addr, int is_unicast, time_t now)
+ {
+ void *end = inbuff + sz;
+ void *opts = inbuff + 34;
+- int msg_type = *((unsigned char *)inbuff);
++ int msg_type = *inbuff;
+ unsigned char *outmsgtypep;
+ void *opt;
+ struct dhcp_vendor *vendor;
+@@ -259,15 +259,15 @@ static int dhcp6_maybe_relay(struct state *state, void *inbuff, size_t sz,
+ return 1;
+ }
+
+-static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_t sz, int is_unicast, time_t now)
++static int dhcp6_no_relay(struct state *state, int msg_type, unsigned char *inbuff, size_t sz, int is_unicast, time_t now)
+ {
+ void *opt;
+- int i, o, o1, start_opts;
++ int i, o, o1, start_opts, start_msg;
+ struct dhcp_opt *opt_cfg;
+ struct dhcp_netid *tagif;
+ struct dhcp_config *config = NULL;
+ struct dhcp_netid known_id, iface_id, v6_id;
+- unsigned char *outmsgtypep;
++ unsigned char outmsgtype;
+ struct dhcp_vendor *vendor;
+ struct dhcp_context *context_tmp;
+ struct dhcp_mac *mac_opt;
+@@ -296,12 +296,13 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ v6_id.next = state->tags;
+ state->tags = &v6_id;
+
+- /* copy over transaction-id, and save pointer to message type */
+- if (!(outmsgtypep = put_opt6(inbuff, 4)))
++ start_msg = save_counter(-1);
++ /* copy over transaction-id */
++ if (!put_opt6(inbuff, 4))
+ return 0;
+ start_opts = save_counter(-1);
+- state->xid = outmsgtypep[3] | outmsgtypep[2] << 8 | outmsgtypep[1] << 16;
+-
++ state->xid = inbuff[3] | inbuff[2] << 8 | inbuff[1] << 16;
++
+ /* We're going to be linking tags from all context we use.
+ mark them as unused so we don't link one twice and break the list */
+ for (context_tmp = state->context; context_tmp; context_tmp = context_tmp->current)
+@@ -347,7 +348,7 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ (msg_type == DHCP6REQUEST || msg_type == DHCP6RENEW || msg_type == DHCP6RELEASE || msg_type == DHCP6DECLINE))
+
+ {
+- *outmsgtypep = DHCP6REPLY;
++ outmsgtype = DHCP6REPLY;
+ o1 = new_opt6(OPTION6_STATUS_CODE);
+ put_opt6_short(DHCP6USEMULTI);
+ put_opt6_string("Use multicast");
+@@ -619,11 +620,11 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ struct dhcp_netid *solicit_tags;
+ struct dhcp_context *c;
+
+- *outmsgtypep = DHCP6ADVERTISE;
++ outmsgtype = DHCP6ADVERTISE;
+
+ if (opt6_find(state->packet_options, state->end, OPTION6_RAPID_COMMIT, 0))
+ {
+- *outmsgtypep = DHCP6REPLY;
++ outmsgtype = DHCP6REPLY;
+ state->lease_allocate = 1;
+ o = new_opt6(OPTION6_RAPID_COMMIT);
+ end_opt6(o);
+@@ -809,7 +810,7 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ int start = save_counter(-1);
+
+ /* set reply message type */
+- *outmsgtypep = DHCP6REPLY;
++ outmsgtype = DHCP6REPLY;
+ state->lease_allocate = 1;
+
+ log6_quiet(state, "DHCPREQUEST", NULL, ignore ? _("ignored") : NULL);
+@@ -924,7 +925,7 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ int address_assigned = 0;
+
+ /* set reply message type */
+- *outmsgtypep = DHCP6REPLY;
++ outmsgtype = DHCP6REPLY;
+
+ log6_quiet(state, msg_type == DHCP6RENEW ? "DHCPRENEW" : "DHCPREBIND", NULL, NULL);
+
+@@ -1057,7 +1058,7 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ int good_addr = 0;
+
+ /* set reply message type */
+- *outmsgtypep = DHCP6REPLY;
++ outmsgtype = DHCP6REPLY;
+
+ log6_quiet(state, "DHCPCONFIRM", NULL, NULL);
+
+@@ -1121,7 +1122,7 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ log6_quiet(state, "DHCPINFORMATION-REQUEST", NULL, ignore ? _("ignored") : state->hostname);
+ if (ignore)
+ return 0;
+- *outmsgtypep = DHCP6REPLY;
++ outmsgtype = DHCP6REPLY;
+ tagif = add_options(state, 1);
+ break;
+ }
+@@ -1130,7 +1131,7 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ case DHCP6RELEASE:
+ {
+ /* set reply message type */
+- *outmsgtypep = DHCP6REPLY;
++ outmsgtype = DHCP6REPLY;
+
+ log6_quiet(state, "DHCPRELEASE", NULL, NULL);
+
+@@ -1195,7 +1196,7 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ case DHCP6DECLINE:
+ {
+ /* set reply message type */
+- *outmsgtypep = DHCP6REPLY;
++ outmsgtype = DHCP6REPLY;
+
+ log6_quiet(state, "DHCPDECLINE", NULL, NULL);
+
+@@ -1275,7 +1276,12 @@ static int dhcp6_no_relay(struct state *state, int msg_type, void *inbuff, size_
+ }
+
+ }
+-
++
++ /* Fill in the message type. Note that we store the offset,
++ not a direct pointer, since the packet memory may have been
++ reallocated. */
++ ((unsigned char *)(daemon->outpacket.iov_base))[start_msg] = outmsgtype;
++
+ log_tags(tagif, state->xid);
+ log6_opts(0, state->xid, daemon->outpacket.iov_base + start_opts, daemon->outpacket.iov_base + save_counter(-1));
+
+--
+2.20.1
+
diff --git a/repo/dnsmasq/config.h.patch b/repo/dnsmasq/config.h.patch
new file mode 100644
index 0000000..7847696
--- /dev/null
+++ b/repo/dnsmasq/config.h.patch
@@ -0,0 +1,12 @@
+Adjust defaults.
+
+--- a/src/config.h
++++ b/src/config.h
+@@ -47,2 +47,2 @@
+-#define CHUSER "nobody"
+-#define CHGRP "dip"
++#define CHUSER "dnsmasq"
++#define CHGRP "dnsmasq"
+@@ -231 +231 @@
+-# define RUNFILE "/var/run/dnsmasq.pid"
++# define RUNFILE "/run/dnsmasq.pid"
diff --git a/repo/dnsmasq/dnsmasq-dnssec.pre-install b/repo/dnsmasq/dnsmasq-dnssec.pre-install
new file mode 100644
index 0000000..708c15b
--- /dev/null
+++ b/repo/dnsmasq/dnsmasq-dnssec.pre-install
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+addgroup -S dnsmasq 2>/dev/null
+adduser -S -D -H -h /dev/null -s /sbin/nologin -G dnsmasq -g dnsmasq dnsmasq 2>/dev/null
+
+exit 0
diff --git a/repo/dnsmasq/dnsmasq-dnssec.pre-upgrade b/repo/dnsmasq/dnsmasq-dnssec.pre-upgrade
new file mode 100644
index 0000000..708c15b
--- /dev/null
+++ b/repo/dnsmasq/dnsmasq-dnssec.pre-upgrade
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+addgroup -S dnsmasq 2>/dev/null
+adduser -S -D -H -h /dev/null -s /sbin/nologin -G dnsmasq -g dnsmasq dnsmasq 2>/dev/null
+
+exit 0
diff --git a/repo/dnsmasq/dnsmasq.conf.patch b/repo/dnsmasq/dnsmasq.conf.patch
new file mode 100644
index 0000000..e3d7df4
--- /dev/null
+++ b/repo/dnsmasq/dnsmasq.conf.patch
@@ -0,0 +1,38 @@
+--- a/dnsmasq.conf.example
++++ b/dnsmasq.conf.example
+@@ -21,8 +21,8 @@
+ #bogus-priv
+
+ # Uncomment these to enable DNSSEC validation and caching:
+-# (Requires dnsmasq to be built with DNSSEC option.)
+-#conf-file=%%PREFIX%%/share/dnsmasq/trust-anchors.conf
++# (Requires dnsmasq-dnssec package to be installed)
++#conf-file=/usr/share/dnsmasq/trust-anchors.conf
+ #dnssec
+
+ # Replies which are not DNSSEC signed may be legitimate, because the domain
+@@ -96,9 +96,13 @@
+
+ # If you want dnsmasq to change uid and gid to something other
+ # than the default, edit the following lines.
+-#user=
+-#group=
++#user=dnsmasq
++#group=dnsmasq
+
++# Serve DNS and DHCP only to networks directly connected to this machine.
++# Any interface= line will override it.
++local-service
++
+ # If you want dnsmasq to listen for DHCP and DNS requests only on
+ # specified interfaces (and the loopback) give the name of the
+ # interface (eg eth0) here.
+@@ -671,7 +675,7 @@
+ #conf-dir=/etc/dnsmasq.d,.bak
+
+ # Include all files in a directory which end in .conf
+-#conf-dir=/etc/dnsmasq.d/,*.conf
++conf-dir=/etc/dnsmasq.d/,*.conf
+
+ # If a DHCP client claims that its name is "wpad", ignore that.
+ # This fixes a security hole. see CERT Vulnerability VU#598349
diff --git a/repo/dnsmasq/dnsmasq.confd b/repo/dnsmasq/dnsmasq.confd
new file mode 100644
index 0000000..564a25d
--- /dev/null
+++ b/repo/dnsmasq/dnsmasq.confd
@@ -0,0 +1,22 @@
+# Configuration for /etc/init.d/dnsmasq
+
+# Path to the dnsmasq configuration file.
+#cfgfile="/etc/dnsmasq.conf"
+
+# Location where to store DHCP leases (sets --dhcp-leasefile).
+#leasefile="/var/lib/misc/$RC_SVCNAME.leases"
+
+# Whether to automatically set up a network bridge when the init script is
+# a symlink with suffix (e.g. /etc/init.d/dnsmasq.br0).
+#setup_bridge=yes
+
+# User and group to change to after startup.
+#user="dnsmasq"
+#group="dnsmasq"
+
+# Additional options to pass to the dnsmasq.
+# See the dnsmasq(8) man page for more information.
+#command_args=
+
+# Uncomment to run with process supervisor.
+# supervisor=supervise-daemon
diff --git a/repo/dnsmasq/dnsmasq.initd b/repo/dnsmasq/dnsmasq.initd
new file mode 100644
index 0000000..be09548
--- /dev/null
+++ b/repo/dnsmasq/dnsmasq.initd
@@ -0,0 +1,151 @@
+#!/sbin/openrc-run
+
+description="A lightweight DNS, DHCP, RA, TFTP and PXE server"
+
+extra_commands="checkconfig"
+description_checkconfig="Check configuration syntax"
+
+extra_started_commands="reload"
+description_reload="Clear cache and reload hosts files"
+
+# DNSMASQ_CONFFILE is here for backward compatibility (Alpine <3.16).
+: ${cfgfile:=${DNSMASQ_CONFFILE:-"/etc/dnsmasq.conf"}}
+: ${leasefile:="/var/lib/misc/$RC_SVCNAME.leases"}
+: ${user:="dnsmasq"}
+: ${group:="dnsmasq"}
+: ${setup_bridge:="yes"}
+
+command="/usr/sbin/dnsmasq"
+# Tell dnsmasq to not create pidfile, that's responsibility of init system.
+# DNSMASQ_OPTS is here for backward compatibility (Alpine <3.16).
+command_args="--keep-in-foreground --pid-file= $DNSMASQ_OPTS $command_args --conf-file=$cfgfile"
+command_background="yes"
+pidfile="/run/$RC_SVCNAME.pid"
+
+if [ "${RC_SVCNAME#*.}" != "$RC_SVCNAME" ] && yesno "$setup_bridge"; then
+ BRIDGE="${RC_SVCNAME#*.}"
+ : ${BRIDGE_ADDR:="10.0.3.1"}
+ : ${BRIDGE_NETMASK:="255.255.255.0"}
+ : ${BRIDGE_NETWORK:="10.0.3.0/24"}
+ : ${BRIDGE_DHCP_RANGE:="10.0.3.2,10.0.3.254"}
+ : ${BRIDGE_DHCP_MAX:="253"}
+ : ${BRIDGE_MAC:="00:16:3e:00:00:00" }
+ : ${DNSMASQ_LISTEN_BRIDGE_ADDR:=yes}
+fi
+
+depend() {
+ provide dns
+ need localmount net
+ after bootmisc
+ use logger
+}
+
+setup_firewall() {
+ local ins=$1 add=$2
+
+ iptables -w $ins INPUT -i "$BRIDGE" -p udp --dport 67 -j ACCEPT
+ iptables -w $ins INPUT -i "$BRIDGE" -p tcp --dport 67 -j ACCEPT
+ iptables -w $ins INPUT -i "$BRIDGE" -p udp --dport 53 -j ACCEPT
+ iptables -w $ins INPUT -i "$BRIDGE" -p tcp --dport 53 -j ACCEPT
+ iptables -w $ins FORWARD -i "$BRIDGE" -j ACCEPT
+ iptables -w $ins FORWARD -o "$BRIDGE" -j ACCEPT
+ iptables -w -t nat $add POSTROUTING -s "$BRIDGE_NETWORK" ! -d "$BRIDGE_NETWORK" -j MASQUERADE
+ iptables -w -t mangle $add POSTROUTING -o "$BRIDGE" -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
+
+ if yesno "$BRIDGE_IPV6_NAT" && [ -n "$BRIDGE_IPV6_NETWORK" ]; then
+ ip6tables -w -t nat $add POSTROUTING -s "$BRIDGE_IPV6_NETWORK" ! -d "$BRIDGE_IPV6_NETWORK" -j MASQUERADE
+ fi
+}
+
+setup_bridge() {
+ einfo "Creating bridge $BRIDGE"
+
+ if ! [ -d "/sys/class/net/$BRIDGE" ]; then
+ ip link add dev "$BRIDGE" type bridge
+ fi
+
+ local addr
+ ip link set dev "$BRIDGE" address "$BRIDGE_MAC" \
+ && for addr in $BRIDGE_ADDR $BRIDGE_ADDR_EXTRA; do
+ case "$addr" in
+ */*) ip addr add "$addr" dev "$BRIDGE";;
+ *) ip addr add "$addr/$BRIDGE_NETMASK" dev "$BRIDGE";;
+ esac
+ done \
+ && ip link set dev "$BRIDGE" up
+
+ echo 1 > /proc/sys/net/ipv4/ip_forward
+ echo 0 > "/proc/sys/net/ipv6/conf/$BRIDGE/accept_dad" || true
+
+ if [ -n "$BRIDGE_IPV6_ADDR" ] && [ -n "$BRIDGE_IPV6_MASK" ] && [ "$BRIDGE_IPV6_NETWORK" ]; then
+ echo 1 > /proc/sys/net/ipv6/conf/all/forwarding
+ echo 0 > "/proc/sys/net/ipv6/conf/$BRIDGE/autoconf"
+
+ ip -6 addr add dev "$BRIDGE" "$BRIDGE_IPV6_ADDR/$BRIDGE_IPV6_MASK"
+
+ command_args="$command_args --dhcp-range=$BRIDGE_IPV6_ADDR,ra-only --listen-address $BRIDGE_IPV6_ADDR"
+ fi
+
+}
+
+start_pre() {
+ $command --test --conf-file="$cfgfile" >/dev/null 2>&1 \
+ || $command --test \
+ || return 1
+
+ checkpath -m 0644 -o "$user:$group" -f "$leasefile" || return 1
+
+ if [ -n "$BRIDGE" ]; then
+ setup_bridge
+ if ! yesno "$DISABLE_IPTABLES"; then
+ setup_firewall -I -A
+ fi
+ if yesno "$DNSMASQ_LISTEN_BRIDGE_ADDR"; then
+ local addr; for addr in $BRIDGE_ADDR; do
+ command_args="$command_args --listen-address ${addr%/*}"
+ done
+ fi
+ command_args="$command_args --strict-order --bind-interfaces --except-interface=lo --interface=$BRIDGE"
+ command_args="$command_args --dhcp-range $BRIDGE_DHCP_RANGE --dhcp-lease-max=$BRIDGE_DHCP_MAX --dhcp-no-override --dhcp-leasefile=$leasefile --dhcp-authoritative"
+ fi
+}
+
+stop_post() {
+ if [ -n "$BRIDGE" ]; then
+ local addr; for addr in $BRIDGE_ADDR $BRIDGE_ADDR_EXTRA; do
+ case "$addr" in
+ */*) ip addr del "$addr" dev "$BRIDGE";;
+ *) ip addr del "$addr/$BRIDGE_NETMASK" dev "$BRIDGE";;
+ esac
+ done
+ ip link set dev "$BRIDGE" down
+ if ! yesno "$DISABLE_IPTABLES"; then
+ setup_firewall -D -D
+ fi
+ # dont destroy if there are attached interfaces
+ ls /sys/class/net/"$BRIDGE"/brif/* > /dev/null 2>&1 || ip link delete "$BRIDGE"
+ fi
+}
+
+reload() {
+ ebegin "Reloading $RC_SVCNAME"
+
+ $command --test --conf-file="$cfgfile" >/dev/null 2>&1 \
+ || $command --test \
+ || return 1
+
+ if [ "$supervisor" ]; then
+ $supervisor "$RC_SVCNAME" --signal HUP
+ else
+ start-stop-daemon --signal HUP --pidfile "$pidfile"
+ fi
+ eend $?
+}
+
+checkconfig() {
+ ebegin "Checking $RC_SVCNAME configuration"
+
+ $command --test --conf-file="$cfgfile"
+
+ eend $?
+}
diff --git a/repo/dnsmasq/dnsmasq.pre-install b/repo/dnsmasq/dnsmasq.pre-install
new file mode 100644
index 0000000..708c15b
--- /dev/null
+++ b/repo/dnsmasq/dnsmasq.pre-install
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+addgroup -S dnsmasq 2>/dev/null
+adduser -S -D -H -h /dev/null -s /sbin/nologin -G dnsmasq -g dnsmasq dnsmasq 2>/dev/null
+
+exit 0
diff --git a/repo/dnsmasq/dnsmasq.pre-upgrade b/repo/dnsmasq/dnsmasq.pre-upgrade
new file mode 100644
index 0000000..708c15b
--- /dev/null
+++ b/repo/dnsmasq/dnsmasq.pre-upgrade
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+addgroup -S dnsmasq 2>/dev/null
+adduser -S -D -H -h /dev/null -s /sbin/nologin -G dnsmasq -g dnsmasq dnsmasq 2>/dev/null
+
+exit 0
diff --git a/repo/dnsmasq/dnsmasq.xibuild b/repo/dnsmasq/dnsmasq.xibuild
new file mode 100644
index 0000000..9865e79
--- /dev/null
+++ b/repo/dnsmasq/dnsmasq.xibuild
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+NAME="dnsmasq"
+DESC="A lightweight DNS, DHCP, RA, TFTP and PXE server"
+
+MAKEDEPS="linux-headers nettle"
+
+PKG_VER=2.86
+SOURCE="https://www.thekelleys.org.uk/dnsmasq/dnsmasq-$PKG_VER.tar.xz"
+
+ADDITIONAL="
+0000-fix-heap-overflow-in-dns-replies.patch
+0001-Retry-on-interrupted-error-in-tftp.patch
+0002-Add-safety-checks-to-places-pointed-by-Coverity.patch
+0003-Small-safeguard-to-unexpected-data.patch
+0004-Fix-bunch-of-warnings-in-auth.c.patch
+0005-Fix-few-coverity-warnings-in-lease-tools.patch
+0006-Fix-coverity-formats-issues-in-blockdata.patch
+0007-Retry-dhcp6-ping-on-interrupts.patch
+0008-Fix-coverity-warnings-on-dbus.patch
+0009-Address-coverity-issues-detected-in-util.c.patch
+0010-Fix-coverity-detected-issues-in-option.c.patch
+0011-Fix-coverity-detected-issue-in-radv.c.patch
+0012-Fix-coverity-detected-issues-in-cache.c.patch
+0013-Fix-coverity-issues-detected-in-domain-match.c.patch
+0014-Fix-coverity-detected-issues-in-dnsmasq.c.patch
+0015-Fix-coverity-issues-in-dnssec.c.patch
+0020-Fix-crash-after-re-reading-empty-resolv.conf.patch
+CVE-2022-0934.patch
+config.h.patch
+dnsmasq-dnssec.pre-install
+dnsmasq-dnssec.pre-upgrade
+dnsmasq.conf.patch
+dnsmasq.confd
+dnsmasq.initd
+dnsmasq.pre-install
+dnsmasq.pre-upgrade
+"
+
+prepare () {
+ apply_patches
+}
+
+build() {
+ make CFLAGS="$CFLAGS" COPTS="-DHAVE_DNSSEC" all
+ mv src/dnsmasq src/dnsmasq~dnssec
+
+ make CFLAGS="$CFLAGS" clean all
+}
+
+# dnsmasq doesn't provide any test suite (shame on them!), so just check that
+# the binary isn't totally broken...
+check() {
+ ./src/dnsmasq --help >/dev/null
+}
+
+package() {
+ provider_priority=100 # highest (other provider is dnsmasq-dnssec)
+
+ make PREFIX=/usr DESTDIR="$PKG_DEST" install
+
+ install -D -m755 "$BUILD_ROOT"/dnsmasq.initd "$PKG_DEST"/etc/init.d/dnsmasq
+ install -D -m644 "$BUILD_ROOT"/dnsmasq.confd "$PKG_DEST"/etc/conf.d/dnsmasq
+}
diff --git a/repo/docbook2x/01_fix_static_datadir_evaluation.patch b/repo/docbook2x/01_fix_static_datadir_evaluation.patch
new file mode 100644
index 0000000..5241dc3
--- /dev/null
+++ b/repo/docbook2x/01_fix_static_datadir_evaluation.patch
@@ -0,0 +1,19 @@
+Description:
+ 01_fix_static_datadir_evaluation.dpatch by Daniel Leidert (dale) <daniel.leidert@wgdd.de>
+ All lines beginning with `## DP:' are a description of the patch.
+ The evaluation of datadir results in "${prefix}/share" without
+ evaluation of the ${prefix} variable with autoconf 2.60.
+
+Index: docbook2X-0.8.8/configure.ac
+===================================================================
+--- docbook2X-0.8.8.orig/configure.ac
++++ docbook2X-0.8.8/configure.ac
+@@ -148,7 +148,7 @@
+ dnl they will reside and should use these static_* values.
+ dnl Ensure that all static_* are fully expanded.
+
+-eval static_datadir="$datadir"
++eval eval static_datadir="$datadir"
+
+ eval static_bindir="$bindir"
+ old_val=""
diff --git a/repo/docbook2x/02_fix_418703_dont_use_abbreviated_sfnet_address.patch b/repo/docbook2x/02_fix_418703_dont_use_abbreviated_sfnet_address.patch
new file mode 100644
index 0000000..681047a
--- /dev/null
+++ b/repo/docbook2x/02_fix_418703_dont_use_abbreviated_sfnet_address.patch
@@ -0,0 +1,27 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## 02_fix_418703_dont_use_abbreviated_sfnet_address.dpatch by Daniel Leidert (dale) <daniel.leidert@wgdd.de>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: Ondrej Certik reported a resolver issue: http://bugs.debian.org/418703.
+## DP: The error seems to be caused by using the abbreviated sf.net URLs. But
+## DP: it is possible, that this issue only occurs together with the issue
+## DP: described in 01_fix_static_datadir_evaluation.dpatch, because the path
+## DP: to the catalog also suffers from this issue.
+
+@DPATCH@
+diff -urNad docbook2x-0.8.8~/perl/db2x_xsltproc.pl docbook2x-0.8.8/perl/db2x_xsltproc.pl
+--- docbook2x-0.8.8~/perl/db2x_xsltproc.pl 2004-08-18 16:21:52.000000000 +0200
++++ docbook2x-0.8.8/perl/db2x_xsltproc.pl 2007-04-12 16:07:20.000000000 +0200
+@@ -110,10 +110,10 @@
+
+ if($options->{'stylesheet'} eq 'texi') {
+ $options->{'stylesheet'} =
+- "http://docbook2x.sf.net/latest/xslt/texi/docbook.xsl";
++ "http://docbook2x.sourceforge.net/latest/xslt/texi/docbook.xsl";
+ } elsif($options->{'stylesheet'} eq 'man') {
+ $options->{'stylesheet'} =
+- "http://docbook2x.sf.net/latest/xslt/man/docbook.xsl";
++ "http://docbook2x.sourceforge.net/latest/xslt/man/docbook.xsl";
+ }
+
+ if(scalar(@argv) != 1) {
diff --git a/repo/docbook2x/03_fix_420153_filename_whitespace_handling.patch b/repo/docbook2x/03_fix_420153_filename_whitespace_handling.patch
new file mode 100644
index 0000000..26cdf8e
--- /dev/null
+++ b/repo/docbook2x/03_fix_420153_filename_whitespace_handling.patch
@@ -0,0 +1,43 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## 03_fix_420153_filename_whitespace_handling.dpatch by
+## Daniel Leidert (dale) <daniel.leidert@wgdd.de>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: Peter Eisentraut reported a regression in the whitespace handling of
+## DP: refentrytitle content during filename creation:
+## DP: http://bugs.debian.org/420153. The problem is, that upstream first
+## DP: replaces all spaces (but not linebreaks btw) with underlines and then
+## DP: it tries to normalize the result. This means, that a linebreak with
+## DP: additional whitespaces results in manpage names like 'foo_ ____bar.9'.
+## DP: So what we basically do in this patch is, that we first normalize the
+## DP: refentrytitle and then replace any spaces left with underlines.
+
+@DPATCH@
+diff -urNad docbook2x-0.8.8~/xslt/man/manpage.xsl docbook2x-0.8.8/xslt/man/manpage.xsl
+--- docbook2x-0.8.8~/xslt/man/manpage.xsl 2006-04-20 15:45:55.000000000 +0200
++++ docbook2x-0.8.8/xslt/man/manpage.xsl 2007-04-20 16:19:28.000000000 +0200
+@@ -30,7 +30,7 @@
+
+ <xsl:template name="manpage-filename">
+ <xsl:param name="filename" />
+- <xsl:value-of select="normalize-space(translate($filename, &quot; /&quot;, &quot;__&quot;))" />
++ <xsl:value-of select="translate(normalize-space($filename), ' /', '__')" />
+ </xsl:template>
+
+
+diff -urNad docbook2x-0.8.8~/xslt/man/refentry.xsl docbook2x-0.8.8/xslt/man/refentry.xsl
+--- docbook2x-0.8.8~/xslt/man/refentry.xsl 2006-04-21 04:39:55.000000000 +0200
++++ docbook2x-0.8.8/xslt/man/refentry.xsl 2007-04-20 16:21:53.000000000 +0200
+@@ -38,7 +38,11 @@
+ <xsl:template name="refentry-filename">
+ <xsl:param name="title" />
+
+- <xsl:variable name="title2" select="translate($title, &quot; /&quot;, &quot;__&quot;)" />
++ <xsl:variable name="title2">
++ <xsl:call-template name="manpage-filename">
++ <xsl:with-param name="filename" select="$title" />
++ </xsl:call-template>
++ </xsl:variable>
+
+ <!-- not using gentext here since man page names tend not to have
+ accented chars / non-Latin chars ...
diff --git a/repo/docbook2x/04_fix_442782_preprocessor_declaration_syntax.patch b/repo/docbook2x/04_fix_442782_preprocessor_declaration_syntax.patch
new file mode 100644
index 0000000..b6ddbfa
--- /dev/null
+++ b/repo/docbook2x/04_fix_442782_preprocessor_declaration_syntax.patch
@@ -0,0 +1,90 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## 04_fix_442782_preprocessor_declaration_syntax.dpatch by Colin Watson <cjwatson@debian.org>.
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: Conventionally, preprocessor declarations should start with
+## DP: '\"
+## DP: rather than
+## DP: .\"
+## DP: Current man-db only supports the first (and recommended) syntax. So
+## DP: Colin Watson provided this patch to fix docbook2x.
+## DP:
+## DP: <URL:http://bugs.debian.org/442782>
+## DP: <URL:http://lists.gnu.org/archive/html/groff/2007-11/msg00023.html>
+
+@DPATCH@
+diff -urNad trunk~/perl/db2x_manxml.pl trunk/perl/db2x_manxml.pl
+--- trunk~/perl/db2x_manxml.pl 2006-04-22 17:21:32.000000000 +0200
++++ trunk/perl/db2x_manxml.pl 2007-11-24 01:27:37.000000000 +0100
+@@ -342,6 +342,25 @@
+ $self->{line_start} = 1;
+ }
+
++#
++# Print a comment in the output, without causing a break.
++# Params: comment - the comment text.
++# May use any characters; they need not be escaped.
++#
++sub comment_nobreak
++{
++ my ($self, $comment) = @_;
++ $self->write("\n") unless $self->{line_start};
++
++ foreach my $line (split(/\n/, $comment)) {
++ $self->write('\'\" ');
++ $self->write($line);
++ $self->write("\n");
++ }
++
++ $self->{line_start} = 1;
++}
++
+
+ #
+ # Use a roff "escape" i.e. commands embedded in text starting with \
+@@ -510,16 +529,20 @@
+
+ $self->{'adjust-stack'} = [ 'b' ];
+
+- $self->{rw}->comment($elem->attr('preprocessors'))
+- if($elem->attr('preprocessors') ne '');
+-
++ my $preprocessors = $elem->attr('preprocessors');
+ # I've dug through the Internet to see if there was any
+ # standard way to specify encoding with man pages.
+ # The following seems to be a reasonable proposal:
+ # <URL:http://mail.nl.linux.org/linux-utf8/2001-04/msg00168.html>
+ my $encoding = $self->{options}->{'encoding'};
+ $encoding =~ s#//TRANSLIT$##i;
+- $self->{rw}->comment("-*- coding: $encoding -*-");
++ $encoding = "-*- coding: $encoding -*-";
++ if ($preprocessors eq '') {
++ $preprocessors = $encoding;
++ } else {
++ $preprocessors = "$preprocessors $encoding";
++ }
++ $self->{rw}->comment_nobreak($preprocessors);
+
+ # Define escapes for switching to and from monospace fonts (groff only)
+ $self->{rw}->request(qw{ .if \n(.g .ds T< \\\\FC});
+diff -urNad trunk~/xslt/backend/db2x_manxml.xsl trunk/xslt/backend/db2x_manxml.xsl
+--- trunk~/xslt/backend/db2x_manxml.xsl 2006-04-23 16:44:52.000000000 +0200
++++ trunk/xslt/backend/db2x_manxml.xsl 2007-11-24 01:27:37.000000000 +0100
+@@ -528,7 +528,7 @@
+ <exslt:document method="text"
+ encoding="{$encoding}"
+ href="{$path}">
+- <xsl:text>.\" -*- coding: </xsl:text>
++ <xsl:text>'\" -*- coding: </xsl:text>
+ <xsl:value-of select="$encoding" />
+ <xsl:text> -*-&#10;</xsl:text>
+ <xsl:copy-of select="$content" />
+@@ -538,7 +538,7 @@
+ <saxon:output method="text"
+ encoding="{$encoding}"
+ href="{$path}">
+- <xsl:text>.\" -*- coding: </xsl:text>
++ <xsl:text>'\" -*- coding: </xsl:text>
+ <xsl:value-of select="$encoding" />
+ <xsl:text> -*-&#10;</xsl:text>
+ <xsl:copy-of select="$content" />
diff --git a/repo/docbook2x/05_fix_439214_error_on_missing_refentry.patch b/repo/docbook2x/05_fix_439214_error_on_missing_refentry.patch
new file mode 100644
index 0000000..b8a493b
--- /dev/null
+++ b/repo/docbook2x/05_fix_439214_error_on_missing_refentry.patch
@@ -0,0 +1,33 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## 05_fix_439214_error_on_missing_refentry.dpatch by Daniel Leidert <daniel.leidert@wgdd.de>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: If there is no refentry element, the manpage stylesheets silently
+## DP: "ignores" this. It has been requested to print at least a
+## DP: warning.
+## DP:
+## DP: <URL:http://bugs.debian.org/439214>
+## DP: <URL:http://lists.gnu.org/archive/html/groff/2007-11/msg00023.html>
+
+@DPATCH@
+diff -urNad trunk~/xslt/man/docbook.xsl trunk/xslt/man/docbook.xsl
+--- trunk~/xslt/man/docbook.xsl 2006-04-11 21:00:19.000000000 +0200
++++ trunk/xslt/man/docbook.xsl 2008-02-05 03:31:48.000000000 +0100
+@@ -111,11 +111,15 @@
+ <xsl:when test="child::refentry">
+ <xsl:apply-templates />
+ </xsl:when>
+-
+- <xsl:otherwise>
++ <xsl:when test="descendant-or-self::refentry">
+ <manpageset>
+ <xsl:apply-templates select="descendant-or-self::refentry" />
+ </manpageset>
++ </xsl:when>
++ <xsl:otherwise>
++ <xsl:message terminate="no">
++ <xsl:text>WARNING: Sorry, but I cannot find a refentry element in your source!</xsl:text>
++ </xsl:message>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:template>
diff --git a/repo/docbook2x/06_fix_man_typo.patch b/repo/docbook2x/06_fix_man_typo.patch
new file mode 100644
index 0000000..5be21bd
--- /dev/null
+++ b/repo/docbook2x/06_fix_man_typo.patch
@@ -0,0 +1,24 @@
+Author: Gianfranco Costamagna <costamagnagianfranco@yahoo.it>
+
+--- docbook2x-0.8.8.orig/doc/docbook2man.1
++++ docbook2x-0.8.8/doc/docbook2man.1
+@@ -187,7 +187,7 @@ parameter instead.
+
+ However, inside a custom stylesheet
+ (\fInot on the command-line\fR)
+-this paramter can be set to the XPath expression
++this parameter can be set to the XPath expression
+ \*(T<document('')\*(T>,
+ which will cause the custom translations
+ directly embedded inside the custom stylesheet to be read.
+--- docbook2x-0.8.8.orig/doc/docbook2texi.1
++++ docbook2x-0.8.8/doc/docbook2texi.1
+@@ -230,7 +230,7 @@ parameter instead.
+
+ However, inside a custom stylesheet
+ (\fInot on the command-line\fR)
+-this paramter can be set to the XPath expression
++this parameter can be set to the XPath expression
+ \*(T<document('')\*(T>,
+ which will cause the custom translations
+ directly embedded inside the custom stylesheet to be read.
diff --git a/repo/docbook2x/docbook2x.xibuild b/repo/docbook2x/docbook2x.xibuild
new file mode 100644
index 0000000..22a1f11
--- /dev/null
+++ b/repo/docbook2x/docbook2x.xibuild
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+NAME="docbook2x"
+DESC="DocBook converter to UNIX manpage and GNU Texinfo format"
+
+MAKEDEPS="autoconf automake perl-xml-sax"
+
+PKG_VER=0.8.8
+SOURCE="https://downloads.sourceforge.net/docbook2x/docbook2X-$PKG_VER.tar.gz"
+
+ADDITIONAL="
+01_fix_static_datadir_evaluation.patch
+02_fix_418703_dont_use_abbreviated_sfnet_address.patch
+03_fix_420153_filename_whitespace_handling.patch
+04_fix_442782_preprocessor_declaration_syntax.patch
+05_fix_439214_error_on_missing_refentry.patch
+06_fix_man_typo.patch
+"
+
+prepare() {
+ apply_patches
+ autoreconf --install
+}
+
+build() {
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --localstatedir=/var \
+ --program-transform-name 's/docbook2/docbook2x-/' \
+ make
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install || return 1
+}
+
diff --git a/repo/electrum/0001-apk-add-instead-of-apt-get-install.patch b/repo/electrum/0001-apk-add-instead-of-apt-get-install.patch
new file mode 100644
index 0000000..29d2201
--- /dev/null
+++ b/repo/electrum/0001-apk-add-instead-of-apt-get-install.patch
@@ -0,0 +1,22 @@
+From 46e1404ddad66422bd2106ff6e6954741191826e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20Adamski?= <michal@ert.pl>
+Date: Wed, 14 Oct 2020 11:06:33 +0200
+Subject: [PATCH] `apk add` instead of `apt-get install`
+
+
+diff --git a/electrum/gui/qt/__init__.py b/electrum/gui/qt/__init__.py
+index 1e1d2831f..b7fe2c42d 100644
+--- a/electrum/gui/qt/__init__.py
++++ b/electrum/gui/qt/__init__.py
+@@ -34,7 +34,7 @@ from typing import Optional, TYPE_CHECKING, List
+ try:
+ import PyQt5
+ except Exception:
+- sys.exit("Error: Could not import PyQt5 on Linux systems, you may try 'sudo apt-get install python3-pyqt5'")
++ sys.exit("Error: Could not import PyQt5 on Linux systems, you may try 'sudo apk add py3-qt5'")
+
+ from PyQt5.QtGui import QGuiApplication
+ from PyQt5.QtWidgets import (QApplication, QSystemTrayIcon, QWidget, QMenu,
+--
+2.25.1
+
diff --git a/repo/electrum/electrum.xibuild b/repo/electrum/electrum.xibuild
new file mode 100644
index 0000000..b4e2a07
--- /dev/null
+++ b/repo/electrum/electrum.xibuild
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+NAME="electrum"
+DESC="Lightweight Bitcoin Wallet"
+
+MAKEDEPS="python python-pytest python-pyside2 python-mock"
+
+PKG_VER=4.1.5
+SOURCE="
+ https://download.electrum.org/$PKG_VER/Electrum-$PKG_VER.tar.gz
+ "
+
+ADDITIONAL="
+0001-apk-add-instead-of-apt-get-install.patch
+"
+prepare () {
+ apply_patches
+}
+
+build() {
+ python3 setup.py build
+}
+
+check() {
+ CI=1 python3 -m pytest
+}
+
+package() {
+ python3 setup.py install --prefix=/usr --root="$PKG_DEST"
+ rm -r "${pkgdir:?}"/home
+}
diff --git a/repo/freeciv/freeciv.xibuild b/repo/freeciv/freeciv.xibuild
new file mode 100644
index 0000000..2ef99f5
--- /dev/null
+++ b/repo/freeciv/freeciv.xibuild
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+NAME="freeciv"
+DESC="Free and Open Source empire-building strategy game (meta package)"
+
+MAKEDEPS="curl readline gettext sdl2-mixer gzip pkg-conf glib atk pango gdk-pixbuf gtk3 qtchooser qt5-qtbase gzip sdl2 sdl2-image sdl2-ttf sdl2-gfx sqlite3"
+
+PKG_VER=3.0.0
+SOURCE="https://downloads.sourceforge.net/sourceforge/freeciv/freeciv-$PKG_VER.tar.xz"
+
+build() {
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --enable-client=gtk3.22,qt,sdl2 \
+ --enable-debug=no \
+ --enable-fcmp=gtk3,qt,cli \
+ --enable-fcdb=sqlite3 \
+ --enable-ipv6=yes \
+ --with-readline
+ make
+}
+
+check() {
+ make check
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+}
+
diff --git a/repo/glfw/glfw.xibuild b/repo/glfw/glfw.xibuild
new file mode 100644
index 0000000..ad49e63
--- /dev/null
+++ b/repo/glfw/glfw.xibuild
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+NAME="glfw"
+DESC="Multi-platform library for OpenGL and Vulkan application development"
+
+MAKEDEPS="libxinerama linux-headers mesa cmake ninja libx11 libxcursor libxrandr libxi"
+
+PKG_VER=3.3.7
+SOURCE="https://github.com/glfw/glfw/releases/download/$PKG_VER/glfw-$PKG_VER.zip"
+
+build() {
+ cmake -G Ninja -B build \
+ -DCMAKE_BUILD_TYPE=MinSizeRel \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DCMAKE_INSTALL_LIBDIR=lib \
+ -DBUILD_SHARED_LIBS=ON
+ cmake --build build
+}
+
+package() {
+ DESTDIR="$PKG_DEST" cmake --install build
+}
+
diff --git a/repo/i2c-tools/i2c-tools.xibuild b/repo/i2c-tools/i2c-tools.xibuild
new file mode 100644
index 0000000..dd907af
--- /dev/null
+++ b/repo/i2c-tools/i2c-tools.xibuild
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+NAME="i2c-tools"
+DESC="Tools for monitoring I2C devices"
+
+MAKEDEPS="linux-headers python"
+
+PKG_VER=4.3
+SOURCE="https://fossies.org/linux/misc/i2c-tools-$PKG_VER.tar.gz"
+
+prepare() {
+ sed -e "s|^DESTDIR.*|DESTDIR = \"$PKG_DEST\"|" \
+ -e "s|^prefix.*|prefix = /usr|" \
+ -e "s|^PREFIX.*|PREFIX = /usr|" \
+ -i Makefile
+}
+
+build() {
+ make
+
+ cd "$BUILD_ROOT"/py-smbus
+ CFLAGS="$CFLAGS -I$BUILD_ROOT/include" python3 setup.py build
+}
+
+package() {
+ make install
+
+ cd "$BUILD_ROOT"/py-smbus
+ python3 setup.py install --prefix=/usr --root="$PKG_DEST"
+}
+
diff --git a/repo/iptables/ebtables.confd b/repo/iptables/ebtables.confd
new file mode 100644
index 0000000..0b48cb4
--- /dev/null
+++ b/repo/iptables/ebtables.confd
@@ -0,0 +1,15 @@
+# /etc/conf.d/ebtables
+
+# Location in which ebtables initscript will save set rules on
+# service shutdown
+EBTABLES_SAVE="/var/lib/ebtables/rules-save"
+
+# Options to pass to ebtables-save and ebtables-restore
+SAVE_RESTORE_OPTIONS=""
+
+# Save state on stopping ebtables
+SAVE_ON_STOP="yes"
+
+# Tables to be saved and restored. If you have built ebtables as modules, you
+# may leave it blank. Otherwise, you MUST define which to control.
+TABLE_NAMES="filter nat"
diff --git a/repo/iptables/ebtables.initd b/repo/iptables/ebtables.initd
new file mode 100644
index 0000000..7d92436
--- /dev/null
+++ b/repo/iptables/ebtables.initd
@@ -0,0 +1,99 @@
+#!/sbin/openrc-run
+# Copyright 1999-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/net-firewall/ebtables/files/ebtables.initd,v 1.2 2007/09/28 19:22:14 pva Exp $
+
+extra_commands="save reload"
+extra_started_commands="panic"
+
+ebtables_bin="/sbin/ebtables"
+ebtables_save=${EBTABLES_SAVE}
+ebtables_tables=$(grep -E '^ebtable_' /proc/modules | cut -f1 -d' ' | sed s/ebtable_//)
+if [ "$ebtables_tables" == "" ] ; then
+ ebtables_tables=${TABLE_NAMES}
+fi
+
+depend() {
+ before net
+ use logger
+}
+
+set_table_policy() {
+ local chains table=$1 policy=$2
+ case ${table} in
+ nat) chains="PREROUTING POSTROUTING OUTPUT";;
+ broute) chains="BROUTING";;
+ filter) chains="INPUT FORWARD OUTPUT";;
+ *) chains="";;
+ esac
+ local chain
+ for chain in ${chains} ; do
+ ${ebtables_bin} -t ${table} -P ${chain} ${policy}
+ done
+}
+
+checkconfig() {
+ if [ ! -f ${ebtables_save} ] ; then
+ eerror "Not starting ebtables. First create some rules then run:"
+ eerror "/etc/init.d/ebtables save"
+ return 1
+ fi
+ return 0
+}
+
+start() {
+ checkconfig || return 1
+ ebegin "Loading ebtables state and starting bridge firewall"
+ ${ebtables_bin}-restore ${SAVE_RESTORE_OPTIONS} < "${ebtables_save}"
+ eend $?
+}
+
+stop() {
+ if [ "${SAVE_ON_STOP}" = "yes" ] ; then
+ save || return 1
+ fi
+ ebegin "Stopping bridge firewall"
+ local a
+ for a in ${ebtables_tables}; do
+ set_table_policy $a ACCEPT
+
+ ${ebtables_bin} -t $a -F
+ ${ebtables_bin} -t $a -X
+ done
+ eend $?
+}
+
+reload() {
+ ebegin "Flushing bridge firewall"
+ local a
+ for a in ${ebtables_tables}; do
+ ${ebtables_bin} -t $a -F
+ ${ebtables_bin} -t $a -X
+ done
+ eend $?
+
+ start
+}
+
+save() {
+ ebegin "Saving ebtables state"
+ checkpath -Fm 0600 "${ebtables_save}"
+ for a in ${ebtables_tables} ; do
+ ${ebtables_bin}-save -t ${a} ${SAVE_RESTORE_OPTIONS} >> "${ebtables_save}"
+ done
+ eend $?
+}
+
+panic() {
+ service_started ebtables && svc_stop
+
+ local a
+ ebegin "Dropping all packets forwarded on bridges"
+ for a in ${ebtables_tables}; do
+ ${ebtables_bin} -t $a -F
+ ${ebtables_bin} -t $a -X
+
+ set_table_policy $a DROP
+ done
+ eend $?
+}
diff --git a/repo/iptables/ip6tables.confd b/repo/iptables/ip6tables.confd
new file mode 100644
index 0000000..1fa63f3
--- /dev/null
+++ b/repo/iptables/ip6tables.confd
@@ -0,0 +1,14 @@
+# /etc/conf.d/ip6tables
+
+# Location in which ip6tables initscript will save set rules on
+# service shutdown
+IP6TABLES_SAVE="/etc/iptables/rules6-save"
+
+# Options to pass to ip6tables-save and ip6tables-restore
+SAVE_RESTORE_OPTIONS="-c"
+
+# Save state on stopping iptables
+SAVE_ON_STOP="yes"
+
+# Enable/disable IPv6 forwarding with the rules
+IPFORWARD="no"
diff --git a/repo/iptables/iptables.confd b/repo/iptables/iptables.confd
new file mode 100644
index 0000000..c9e5a68
--- /dev/null
+++ b/repo/iptables/iptables.confd
@@ -0,0 +1,14 @@
+# /etc/conf.d/iptables
+
+# Location in which iptables initscript will save set rules on
+# service shutdown
+IPTABLES_SAVE="/etc/iptables/rules-save"
+
+# Options to pass to iptables-save and iptables-restore
+SAVE_RESTORE_OPTIONS="-c"
+
+# Save state on stopping iptables
+SAVE_ON_STOP="yes"
+
+# Enable/disable IPv4 forwarding with the rules
+IPFORWARD="no"
diff --git a/repo/iptables/iptables.initd b/repo/iptables/iptables.initd
new file mode 100644
index 0000000..0f906ee
--- /dev/null
+++ b/repo/iptables/iptables.initd
@@ -0,0 +1,135 @@
+#!/sbin/openrc-run
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/net-firewall/iptables/files/iptables-1.4.11.init,v 1.2 2011/12/04 10:15:59 swegener Exp $
+
+description="IPv4/IPv6 packet filtering and NAT"
+description_save="Save firewall state"
+description_panic="Drop all packets"
+description_reload="Reload configuration"
+
+extra_commands="save panic"
+extra_started_commands="reload"
+
+iptables_name=${SVCNAME}
+if [ "${iptables_name}" != "iptables" -a "${iptables_name}" != "ip6tables" ] ; then
+ iptables_name="iptables"
+fi
+
+iptables_bin="/sbin/${iptables_name}"
+case ${iptables_name} in
+ iptables) iptables_proc="/proc/net/ip_tables_names"
+ iptables_save=${IPTABLES_SAVE}
+ sysctl_ipfwd=net.ipv4.ip_forward;;
+ ip6tables) iptables_proc="/proc/net/ip6_tables_names"
+ iptables_save=${IP6TABLES_SAVE}
+ sysctl_ipfwd=net.ipv6.conf.all.forwarding;;
+esac
+
+depend() {
+ before net
+ after sysctl
+ use logger
+ provide firewall
+}
+
+set_table_policy() {
+ local chains table=$1 policy=$2
+ case ${table} in
+ nat) chains="PREROUTING POSTROUTING OUTPUT";;
+ mangle) chains="PREROUTING INPUT FORWARD OUTPUT POSTROUTING";;
+ filter) chains="INPUT FORWARD OUTPUT";;
+ *) chains="";;
+ esac
+ local chain
+ for chain in ${chains} ; do
+ ${iptables_bin} -w 5 -t ${table} -P ${chain} ${policy}
+ done
+}
+
+checkkernel() {
+ if [ ! -e ${iptables_proc} ] ; then
+ eerror "Your kernel lacks ${iptables_name} support, please load"
+ eerror "appropriate modules and try again."
+ return 1
+ fi
+ return 0
+}
+checkconfig() {
+ if [ ! -f ${iptables_save} ] ; then
+ eerror "Not starting ${iptables_name}. First create some rules then run:"
+ eerror "/etc/init.d/${iptables_name} save"
+ return 1
+ fi
+ return 0
+}
+
+start() {
+ checkconfig || return 1
+ ebegin "Loading ${iptables_name} state and starting firewall"
+ ${iptables_bin}-restore ${SAVE_RESTORE_OPTIONS} < "${iptables_save}"
+ eend $?
+ if yesno "${IPFORWARD}"; then
+ ebegin "Enabling forwarding"
+ /sbin/sysctl -w ${sysctl_ipfwd}=1 > /dev/null
+ eend $?
+ fi
+}
+
+stop() {
+ if yesno "${IPFORWARD}"; then
+ ebegin "Disabling forwarding"
+ /sbin/sysctl -w ${sysctl_ipfwd}=0 > /dev/null
+ eend $?
+ fi
+ if yesno "${SAVE_ON_STOP}"; then
+ save || return 1
+ fi
+ checkkernel || return 1
+ ebegin "Stopping firewall"
+ local a
+ for a in $(cat ${iptables_proc}) ; do
+ set_table_policy $a ACCEPT
+
+ ${iptables_bin} -w 5 -F -t $a
+ ${iptables_bin} -w 5 -X -t $a
+ done
+ eend $?
+}
+
+reload() {
+ checkkernel || return 1
+ ebegin "Flushing firewall"
+ local a
+ for a in $(cat ${iptables_proc}) ; do
+ ${iptables_bin} -w 5 -F -t $a
+ ${iptables_bin} -w 5 -X -t $a
+ done
+ eend $?
+
+ start
+}
+
+save() {
+ ebegin "Saving ${iptables_name} state"
+ checkpath -fm 0600 "${iptables_save}"
+ ${iptables_bin}-save ${SAVE_RESTORE_OPTIONS} > "${iptables_save}"
+ eend $?
+}
+
+panic() {
+ checkkernel || return 1
+ if service_started ${iptables_name}; then
+ rc-service ${iptables_name} stop
+ fi
+
+ local a
+ ebegin "Dropping all packets"
+ for a in $(cat ${iptables_proc}) ; do
+ ${iptables_bin} -w 5 -F -t $a
+ ${iptables_bin} -w 5 -X -t $a
+
+ set_table_policy $a DROP
+ done
+ eend $?
+}
diff --git a/repo/iptables/iptables.xibuild b/repo/iptables/iptables.xibuild
new file mode 100644
index 0000000..8d8cead
--- /dev/null
+++ b/repo/iptables/iptables.xibuild
@@ -0,0 +1,59 @@
+#!/bin/sh
+
+NAME="iptables"
+DESC="Linux kernel firewall, NAT and packet mangling tools"
+
+MAKEDEPS=" linux-headers libnftnl bison flex autoconf automake"
+
+PKG_VER=1.8.7
+SOURCE="https://www.netfilter.org/projects/iptables/files/iptables-$PKG_VER.tar.bz2"
+
+ADDITIONAL="
+ebtables.confd
+ebtables.initd
+ip6tables.confd
+iptables.confd
+iptables.initd
+use-sh-iptables-apply.patch
+"
+
+prepare () {
+ apply_patches
+}
+
+build() {
+ export CFLAGS="$CFLAGS -D_GNU_SOURCE"
+ ./configure \
+ --prefix=/usr \
+ --mandir=/usr/share/man \
+ --sbindir=/sbin \
+ --sysconfdir=/etc \
+ --without-kernel \
+ --enable-devel \
+ --enable-libipq \
+ --enable-shared
+
+ # do not use rpath
+ sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool
+ sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool
+
+ make
+}
+
+package() {
+ make -j1 install DESTDIR="$PKG_DEST"
+
+ mkdir -p "$PKG_DEST"/usr/include/libiptc \
+ "$PKG_DEST"/usr/lib \
+ "$PKG_DEST"/var/lib/iptables \
+ "$PKG_DEST"/etc/iptables
+
+ install -m644 include/iptables.h include/ip6tables.h \
+ "$PKG_DEST"/usr/include/
+ install include/libiptc/*.h "$PKG_DEST"/usr/include/libiptc/
+
+ install -D -m755 "$BUILD_ROOT"/iptables.initd "$PKG_DEST"/etc/init.d/iptables
+ install -D -m644 "$BUILD_ROOT"/iptables.confd "$PKG_DEST"/etc/conf.d/iptables
+ install -D -m755 "$BUILD_ROOT"/ebtables.initd "$PKG_DEST"/etc/init.d/ebtables
+ install -D -m644 "$BUILD_ROOT"/ebtables.confd "$PKG_DEST"/etc/conf.d/ebtables
+}
diff --git a/repo/iptables/use-sh-iptables-apply.patch b/repo/iptables/use-sh-iptables-apply.patch
new file mode 100644
index 0000000..b31fc94
--- /dev/null
+++ b/repo/iptables/use-sh-iptables-apply.patch
@@ -0,0 +1,39 @@
+From: Simon Frankenberger <simon-alpine@fraho.eu>
+
+make iptables-apply use posix sh
+
+--- a/iptables/iptables-apply
++++ b/iptables/iptables-apply
+@@ -1,4 +1,4 @@
+-#!/bin/bash
++#!/bin/sh
+ # iptables-apply -- a safer way to update iptables remotely
+ #
+ # Usage:
+@@ -110,7 +110,7 @@
+ }
+
+ function checkcommands() {
+- for cmd in "${COMMANDS[@]}"; do
++ for cmd in ${COMMANDS}; do
+ if ! command -v "$cmd" >/dev/null; then
+ echo "Error: needed command not found: $cmd" >&2
+ exit 127
+@@ -184,7 +184,7 @@
+ fi
+
+ # Needed commands
+- COMMANDS=(mktemp "$SAVE" "$RESTORE" "$RUNCMD")
++ COMMANDS="mktemp $SAVE $RESTORE $RUNCMD"
+ checkcommands
+ ;;
+ (*)
+@@ -196,7 +196,7 @@
+ fi
+
+ # Needed commands
+- COMMANDS=(mktemp "$SAVE" "$RESTORE")
++ COMMANDS="mktemp $SAVE $RESTORE"
+ checkcommands
+ ;;
+ esac
diff --git a/repo/lf/lf.xibuild b/repo/lf/lf.xibuild
new file mode 100644
index 0000000..192070a
--- /dev/null
+++ b/repo/lf/lf.xibuild
@@ -0,0 +1,33 @@
+#!/bin/sh
+
+NAME="lf"
+DESC="Terminal filemanager written in Go with vim-style keybindings"
+
+MAKEDEPS="go"
+
+PKG_VER=26
+SOURCE="https://github.com/gokcehan/lf/archive/r$PKG_VER.tar.gz"
+
+build() {
+ go build -v -o bin/lf
+}
+
+check() {
+ go test ./...
+}
+
+package() {
+ install -Dm0755 bin/lf "$PKG_DEST"/usr/bin
+
+ # Manpages
+ install -Dm0644 lf.1 "$PKG_DEST"/usr/share/man/man1/lf.1
+
+ # .desktop file for menus
+ install -Dm0644 lf.desktop "$PKG_DEST"/usr/share/applications
+
+ # Shell completions
+ install -Dm0644 etc/lf.bash "$PKG_DEST"/usr/share/bash-completion/completions/lf
+ install -Dm0644 etc/lf.zsh "$PKG_DEST"/usr/share/zsh/site-functions/_lf
+ install -Dm0644 etc/lf.fish "$PKG_DEST"/usr/share/fish/completions
+}
+
diff --git a/repo/libmnl/libmnl.xibuild b/repo/libmnl/libmnl.xibuild
new file mode 100644
index 0000000..4a48350
--- /dev/null
+++ b/repo/libmnl/libmnl.xibuild
@@ -0,0 +1,36 @@
+#!/bin/sh
+
+NAME="libmnl"
+DESC="Library for minimalistic netlink"
+
+MAKEDEPS=" linux-headers musl-legacy-compat"
+
+PKG_VER=1.0.5
+SOURCE="https://www.netfilter.org/projects/libmnl/files/libmnl-$PKG_VER.tar.bz2"
+
+ADDITIONAL="
+musl-fix-headers.patch
+"
+
+prepare () {
+ apply_patches
+ sed -i 's/--no-dereference --preserve=links,mode,timestamps/-p/g' doxygen/Makefile.in
+}
+
+
+
+build() {
+ ./configure \
+ --prefix=/usr \
+ --enable-static \
+ --without-doxygen
+}
+
+check() {
+ make check
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+}
+
diff --git a/repo/libmnl/musl-fix-headers.patch b/repo/libmnl/musl-fix-headers.patch
new file mode 100644
index 0000000..f3338b1
--- /dev/null
+++ b/repo/libmnl/musl-fix-headers.patch
@@ -0,0 +1,13 @@
+diff --git a/examples/netfilter/nfct-daemon.c b/examples/netfilter/nfct-daemon.c
+index a97c2ec..e3bb17a 100644
+--- a/examples/netfilter/nfct-daemon.c
++++ b/examples/netfilter/nfct-daemon.c
+@@ -20,6 +20,8 @@
+ #include <linux/netfilter/nfnetlink_conntrack.h>
+
+ #include <sys/queue.h>
++#include <sys/time.h>
++#include <sys/select.h>
+
+ struct nstats {
+ LIST_ENTRY(nstats) list;
diff --git a/repo/libnftnl/libnftnl.xibuild b/repo/libnftnl/libnftnl.xibuild
new file mode 100644
index 0000000..5f36744
--- /dev/null
+++ b/repo/libnftnl/libnftnl.xibuild
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+NAME="libnftnl"
+DESC="Netfilter library providing interface to the nf_tables subsystem"
+
+MAKEDEPS=" bash jansson libmnl"
+
+PKG_VER=1.2.1
+SOURCE="https://netfilter.org/projects/libnftnl/files/libnftnl-$PKG_VER.tar.bz2"
+
+build() {
+ cd "$BUILD_ROOT"
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --mandir=/usr/share/man \
+ --localstatedir=/var \
+ --enable-static
+ make
+}
+
+check() {
+ cd "$BUILD_ROOT"/tests
+ make check
+}
+
+package() {
+ cd "$BUILD_ROOT"
+ make DESTDIR="$PKG_DEST" install
+}
+
diff --git a/repo/libsecp256k1/libsecp256k1.xibuild b/repo/libsecp256k1/libsecp256k1.xibuild
new file mode 100644
index 0000000..d9296bf
--- /dev/null
+++ b/repo/libsecp256k1/libsecp256k1.xibuild
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+NAME="libsecp256k1"
+DESC="Optimized C library for EC operations on curve secp256k1"
+MAKEDEPS=" autoconf automake libtool"
+
+SOURCE="https://github.com/bitcoin-core/secp256k1.git"
+
+prepare() {
+ ./autogen.sh
+}
+
+build() {
+ ./configure \
+ --prefix=/usr \
+ --disable-static \
+ --enable-module-ecdh \
+ --enable-module-recovery
+ make
+}
+
+check() {
+ make check
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+}
diff --git a/repo/libtheora/automake.patch b/repo/libtheora/automake.patch
new file mode 100644
index 0000000..f004970
--- /dev/null
+++ b/repo/libtheora/automake.patch
@@ -0,0 +1,11 @@
+--- ./configure.ac.orig 2012-12-31 20:15:29.384600257 +0000
++++ ./configure.ac 2012-12-31 20:15:38.461347599 +0000
+@@ -9,7 +9,7 @@
+ AC_CANONICAL_HOST
+ AC_CANONICAL_TARGET
+
+-AM_CONFIG_HEADER([config.h])
++AC_CONFIG_HEADER([config.h])
+ AC_CONFIG_SRCDIR([lib/fdct.c])
+ AM_INIT_AUTOMAKE
+ AM_MAINTAINER_MODE
diff --git a/repo/libtheora/enc.patch b/repo/libtheora/enc.patch
new file mode 100644
index 0000000..ea604d2
--- /dev/null
+++ b/repo/libtheora/enc.patch
@@ -0,0 +1,11 @@
+--- ./lib/Makefile.am.orig
++++ ./lib/Makefile.am
+@@ -152,7 +154,7 @@
+ Version_script-enc theoraenc.exp
+ libtheoraenc_la_LDFLAGS = \
+ -version-info @THENC_LIB_CURRENT@:@THENC_LIB_REVISION@:@THENC_LIB_AGE@ \
+- @THEORAENC_LDFLAGS@ $(OGG_LIBS)
++ -ltheoradec @THEORAENC_LDFLAGS@ $(OGG_LIBS)
+
+ libtheora_la_SOURCES = \
+ $(decoder_sources) \
diff --git a/repo/libtheora/fix-mmx.patch b/repo/libtheora/fix-mmx.patch
new file mode 100644
index 0000000..63fb9f7
--- /dev/null
+++ b/repo/libtheora/fix-mmx.patch
@@ -0,0 +1,31 @@
+http://bugs.alpinelinux.org/issues/6132
+https://trac.xiph.org/ticket/2287
+
+patch rebased for libtheory 1.1.1 stable
+
+diff -ru libtheora-1.1.1.orig/lib/encode.c libtheora-1.1.1/lib/encode.c
+--- libtheora-1.1.1.orig/lib/encode.c 2009-08-22 18:14:04.000000000 +0000
++++ libtheora-1.1.1/lib/encode.c 2016-09-15 05:27:02.065785527 +0000
+@@ -864,6 +864,9 @@
+ }
+
+ static void oc_enc_frame_pack(oc_enc_ctx *_enc){
++ /*musl libc malloc()/realloc() calls might use floating point, so make sure
++ we've cleared the MMX state for them.*/
++ oc_restore_fpu(&_enc->state);
+ oggpackB_reset(&_enc->opb);
+ /*Only proceed if we have some coded blocks.
+ If there are no coded blocks, we can drop this frame simply by emitting a
+diff -ru libtheora-1.1.1.orig/lib/decode.c libtheora-1.1.1/lib/decode.c
+--- libtheora-1.1.1.orig/lib/decode.c 2009-09-26 20:55:21.000000000 +0000
++++ libtheora-1.1.1/lib/decode.c 2016-09-15 05:29:45.912196850 +0000
+@@ -1181,6 +1181,9 @@
+
+
+ static int oc_dec_postprocess_init(oc_dec_ctx *_dec){
++ /*musl libc malloc()/realloc() calls might use floating point, so make sure
++ we've cleared the MMX state for them.*/
++ oc_restore_fpu(&_dec->state);
+ /*pp_level 0: disabled; free any memory used and return*/
+ if(_dec->pp_level<=OC_PP_LEVEL_DISABLED){
+ if(_dec->dc_qis!=NULL){
diff --git a/repo/libtheora/fix-timeb.patch b/repo/libtheora/fix-timeb.patch
new file mode 100644
index 0000000..5344c07
--- /dev/null
+++ b/repo/libtheora/fix-timeb.patch
@@ -0,0 +1,75 @@
+--- libtheora-1.1.1.orig/examples/dump_psnr.c
++++ libtheora-1.1.1/examples/dump_psnr.c
+@@ -37,7 +37,6 @@
+ #endif
+ #include <stdlib.h>
+ #include <string.h>
+-#include <sys/timeb.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ /*Yes, yes, we're going to hell.*/
+--- libtheora-1.1.1.orig/examples/dump_video.c
++++ libtheora-1.1.1/examples/dump_video.c
+@@ -37,7 +37,7 @@
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+-#include <sys/timeb.h>
++#include <sys/time.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ /*Yes, yes, we're going to hell.*/
+@@ -205,9 +205,9 @@
+ int long_option_index;
+ int c;
+
+- struct timeb start;
+- struct timeb after;
+- struct timeb last;
++ struct timeval start;
++ struct timeval after;
++ struct timeval last;
+ int fps_only=0;
+ int frames = 0;
+
+@@ -418,8 +418,8 @@
+ }
+
+ if(fps_only){
+- ftime(&start);
+- ftime(&last);
++ gettimeofday(&start, NULL);
++ gettimeofday(&last, NULL);
+ }
+
+ while(!got_sigint){
+@@ -433,7 +433,7 @@
+ videobuf_ready=1;
+ frames++;
+ if(fps_only)
+- ftime(&after);
++ gettimeofday(&after, NULL);
+ }
+
+ }else
+@@ -442,16 +442,16 @@
+
+ if(fps_only && (videobuf_ready || fps_only==2)){
+ long ms =
+- after.time*1000.+after.millitm-
+- (last.time*1000.+last.millitm);
++ after.tv_sec*1000.+after.tv_usec/1000-
++ (last.tv_sec*1000.+last.tv_usec/1000);
+
+ if(ms>500 || fps_only==1 ||
+ (feof(infile) && !videobuf_ready)){
+ float file_fps = (float)ti.fps_numerator/ti.fps_denominator;
+ fps_only=2;
+
+- ms = after.time*1000.+after.millitm-
+- (start.time*1000.+start.millitm);
++ ms = after.tv_sec*1000.+after.tv_usec/1000-
++ (start.tv_sec*1000.+start.tv_usec/1000);
+
+ fprintf(stderr,"\rframe:%d rate:%.2fx ",
+ frames,
diff --git a/repo/libtheora/libtheora-flags.patch b/repo/libtheora/libtheora-flags.patch
new file mode 100644
index 0000000..a47cc57
--- /dev/null
+++ b/repo/libtheora/libtheora-flags.patch
@@ -0,0 +1,14 @@
+diff -ur libtheora-1.0beta2.orig/configure.ac libtheora-1.0beta2/configure.ac
+--- libtheora-1.0beta2.orig/configure.ac 2007-10-13 00:01:38.000000000 +0300
++++ libtheora-1.0beta2/configure.ac 2007-11-25 22:44:49.000000000 +0200
+@@ -102,8 +102,8 @@
+ case $host in
+ *)
+ DEBUG="-g -Wall -Wno-parentheses -DDEBUG -D__NO_MATH_INLINES"
+- CFLAGS="-Wall -Wno-parentheses -O3 -fforce-addr -fomit-frame-pointer -finline-functions -funroll-loops"
+- PROFILE="-Wall -Wno-parentheses -pg -g -O3 -fno-inline-functions -DDEBUG";;
++ CFLAGS="-Wall -Wno-parentheses"
++ PROFILE="-Wall -Wno-parentheses -pg -g -fno-inline-functions -DDEBUG";;
+ esac
+ fi
+ CFLAGS="$CFLAGS $cflags_save"
diff --git a/repo/libvirt/libvirt-6.0.0-fix_paths_in_libvirt-guests_sh.patch b/repo/libvirt/libvirt-6.0.0-fix_paths_in_libvirt-guests_sh.patch
new file mode 100644
index 0000000..bf7a328
--- /dev/null
+++ b/repo/libvirt/libvirt-6.0.0-fix_paths_in_libvirt-guests_sh.patch
@@ -0,0 +1,35 @@
+From e97700d867ffa949c97f8a635a76b9ce510e806f Mon Sep 17 00:00:00 2001
+Message-Id: <e97700d867ffa949c97f8a635a76b9ce510e806f.1580460243.git.mprivozn@redhat.com>
+In-Reply-To: <5965f20fe0275b324c9b84ab7f48dd7db0494495.1580460243.git.mprivozn@redhat.com>
+References: <5965f20fe0275b324c9b84ab7f48dd7db0494495.1580460243.git.mprivozn@redhat.com>
+From: Michal Privoznik <mprivozn@redhat.com>
+Date: Fri, 31 Jan 2020 09:42:14 +0100
+Subject: [PATCH 2/3] Fix paths in libvirt-guests.sh.in
+
+Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
+---
+ tools/libvirt-guests.sh.in | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tools/libvirt-guests.sh.in b/tools/libvirt-guests.sh.in
+index a881f6266e..79d38b3e9a 100644
+--- a/tools/libvirt-guests.sh.in
++++ b/tools/libvirt-guests.sh.in
+@@ -39,11 +39,11 @@ START_DELAY=0
+ BYPASS_CACHE=0
+ SYNC_TIME=0
+
+-test -f "$sysconfdir"/sysconfig/libvirt-guests &&
+- . "$sysconfdir"/sysconfig/libvirt-guests
++test -f "$sysconfdir"/libvirt/libvirt-guests.conf &&
++ . "$sysconfdir"/libvirt/libvirt-guests.conf
+
+ LISTFILE="$localstatedir"/lib/libvirt/libvirt-guests
+-VAR_SUBSYS_LIBVIRT_GUESTS="$localstatedir"/lock/subsys/libvirt-guests
++VAR_SUBSYS_LIBVIRT_GUESTS="$localstatedir"/lock/libvirt-guests
+
+ RETVAL=0
+
+--
+2.24.1
+
diff --git a/repo/libvirt/libvirt-guests.confd b/repo/libvirt/libvirt-guests.confd
new file mode 100644
index 0000000..ed2ce58
--- /dev/null
+++ b/repo/libvirt/libvirt-guests.confd
@@ -0,0 +1,68 @@
+# /etc/conf.d/libvirtd
+
+# LIBVIRT_URIS
+# space separated list of libvirt URIs to communicate with to start/stop guests
+# Valid values are anything that can be passed to 'virsh connect'
+
+#LIBVIRT_URIS="qemu:///system"
+
+
+# LIBVIRT_SHUTDOWN
+# Valid options:
+# * managedsave - Performs a state save external to the VM (for hypervisors
+# supporting this operation). qemu-kvm will stop the CPU
+# and save off all state to a separate file. When the
+# machine is started again, it will resume like nothing
+# ever happened. This is guarenteed to always successfully
+# stop your machine and restart it.
+#
+# * shutdown - Sends an ACPI shutdown (think of this as a request to
+# your guest to shutdown). There is no way to distinguish
+# between guests that are ignoring the shutdown request or
+# are stuck or are taking a long time to shutdown. We will
+# wait LIBVIRT_MAXWAIT seconds before yanking the power
+# out.
+#
+# * destroy - Immediately stop all running guests. Use with caution as
+# this can leave the guest in a corrupted state and might
+# lead to data loss.
+#
+
+#LIBVIRT_SHUTDOWN="managedsave"
+
+
+# LIBVIRT_MAXWAIT
+# Timeout in seconds until stopping a guest and "pulling the plug" on the
+# guest
+# Valid values are any integer over 0
+
+#LIBVIRT_MAXWAIT="500"
+
+
+# LIBVIRT_START
+# If this value is set to 'no', then guests and networks that were shutdown
+# by this script when it was stopped will not be started when it is started
+# back up.
+# Valid values are yes or no
+
+#LIBVIRT_START="yes"
+
+
+# LIBVIRT_IGNORE_AUTOSTART
+# If the VM is marked for autostart in its XML configuration then we won't
+# save its start when the init script is stopped. The result is that when
+# the init script starts back up, no attempt will be made to start the VM or
+# confirm it is started.
+# Valid values are yes or no
+
+#LIBVIRT_IGNORE_AUTOSTART="no"
+
+
+# LIBVIRT_NET_SHUTDOWN
+# If libvirtd created networks for you (e.g. NATed networks) then this init
+# script will shut them down for you if this is set to 'yes'. Otherwise,
+# the networks will be left running. For this option to be useful you must
+# have enabled the 'virt-network' USE flag and have had libvirt create a
+# NATed network for you. Valid values: 'yes' or 'no'
+
+#LIBVIRT_NET_SHUTDOWN="yes"
diff --git a/repo/libvirt/libvirt-guests.initd b/repo/libvirt/libvirt-guests.initd
new file mode 100644
index 0000000..b29f04c
--- /dev/null
+++ b/repo/libvirt/libvirt-guests.initd
@@ -0,0 +1,237 @@
+#!/sbin/openrc-run
+
+description="Virtual Machine Management (libvirt) Guests"
+
+depend() {
+ use libvirtd
+}
+
+# set the default to QEMU
+[ -z "${LIBVIRT_URIS}" ] && LIBVIRT_URIS="qemu:///system"
+
+# default to suspending the VM via managedsave
+case "${LIBVIRT_SHUTDOWN}" in
+ managedsave|shutdown|destroy) ;;
+ *) LIBVIRT_SHUTDOWN="managedsave" ;;
+esac
+
+# default to 500 seconds
+[ -z ${LIBVIRT_MAXWAIT} ] && LIBVIRT_MAXWAIT=500
+
+gueststatefile="/var/lib/libvirt/libvirt-guests.state"
+netstatefile="/var/lib/libvirt/libvirt-net.state"
+
+do_virsh() {
+ local hvuri=$1
+ shift
+
+ # if unset, default to qemu
+ [ -z ${hvuri} ] && hvuri="qemu:///system"
+ # if only qemu was supplied then correct the value
+ [ "xqemu" = x${hvuri} ] && hvuri="qemu:///system"
+
+ # Silence errors because virsh always throws an error about
+ # not finding the hypervisor version when connecting to libvirtd
+ # lastly strip the blank line at the end
+ LC_ALL=C virsh -c ${hvuri} "$@" 2>/dev/null | head -n -1
+}
+
+libvirtd_dom_list() {
+ # Only work with domains by their UUIDs
+ local hvuri=$1
+ shift
+
+ # The grep is to remove dom0 for xen domains. Otherwise we never hit 0
+ do_virsh "${hvuri}" list --uuid $@ | grep -v 00000000-0000-0000-0000-000000000000
+}
+
+libvirtd_dom_count() {
+ local hvuri=$1
+ shift
+
+ libvirtd_dom_list "${hvuri}" $@ | wc -l
+}
+
+libvirtd_net_list() {
+ # Only work with networks by their UUIDs
+ local hvuri=$1
+ shift
+
+ do_virsh "${hvuri}" net-list --uuid $@
+}
+
+libvirtd_net_count() {
+ local hvuri=$1
+ shift
+
+ libvirtd_net_list "${hvuri}" $@ | wc -l
+}
+
+libvirtd_dom_stop() {
+ # stops all persistent or transient domains for a given URI
+ # $1 - uri
+ # $2 - persisent/transient
+
+ local uri=$1
+ local persist=$2
+ local shutdown_type=${LIBVIRT_SHUTDOWN}
+ local counter=${LIBVIRT_MAXWAIT}
+ local dom_name=
+ local dom_as=
+ local dom_ids=
+ local uuid=
+ local dom_count=
+
+ [ "${persist}" = "--transient" ] && shutdown_type="shutdown"
+ [ -n "${counter}" ] || counter=500
+
+ einfo " Shutting down domain(s) ..."
+
+ # grab all persistent or transient domains running
+ dom_ids=$(libvirtd_dom_list ${uri} ${persist})
+
+ for uuid in ${dom_ids}; do
+ # Get the name
+ dom_name=$(do_virsh ${uri} domname ${uuid})
+ einfo " ${dom_name}"
+ # Get autostart state
+ dom_as=$(do_virsh ${uri} dominfo ${uuid} | \
+ awk '$1 == "Autostart:" { print $2 }')
+
+ if [ "${persist}" = "--persistent" ]; then
+ # Save our running state only if LIBVIRT_IGNORE_AUTOSTART != yes
+ if [ "x${LIBVIRT_IGNORE_AUTOSTART}" = "xyes" ] && \
+ [ ${dom_as} = "enabled" ]; then
+ :
+ else
+ echo "${uri} ${uuid}" >> ${gueststatefile}
+ fi
+
+ fi
+
+ # Now let's stop it
+ do_virsh "${uri}" ${shutdown_type} ${uuid} > /dev/null
+
+ done
+
+ dom_count="$(libvirtd_dom_count ${uri} ${persist})"
+ while [ ${dom_count} -gt 0 ] && [ ${counter} -gt 0 ] ; do
+ dom_count="$(libvirtd_dom_count ${uri} ${persist})"
+ sleep 1
+ if [ "${shutdown_type}" = "shutdown" ]; then
+ counter=$((${counter} - 1))
+ fi
+ printf "."
+ done
+
+ if [ "${shutdown_type}" = "shutdown" ]; then
+ # grab all domains still running
+ dom_ids=$(libvirtd_dom_list ${uri} ${persist})
+ for uuid in ${dom_ids}; do
+ dom_name=$(do_virsh ${uri} domname ${uuid})
+ eerror " ${dom_name} forcibly stopped"
+ do_virsh "${uri}" destroy ${uuid} > /dev/null
+ done
+ fi
+}
+
+libvirtd_net_stop() {
+ # stops all persistent or transient domains for a given URI
+ # $1 - uri
+ # $2 - persisent/transient
+
+ local uri=$1
+ local persist=$2
+ local uuid=
+ local net_name=
+
+ if [ "${LIBVIRT_NET_SHUTDOWN}" != "no" ]; then
+
+ einfo " Shutting down network(s):"
+ for uuid in $(libvirtd_net_list ${uri} ${persist}); do
+ net_name=$(do_virsh ${uri} net-name ${uuid})
+ einfo " ${net_name}"
+
+ if [ "${persist}" = "--persistent" ]; then
+ # Save our running state
+ echo "${uri} ${uuid}" >> ${netstatefile}
+
+ fi
+
+ # Actually stop the network
+ do_virsh qemu net-destroy ${uuid} > /dev/null
+ done
+
+ fi
+}
+
+start() {
+ local uri=
+ local uuid=
+ local name=
+
+ for uri in ${LIBVIRT_URIS}; do
+ do_virsh "${uri}" connect
+ if [ $? -ne 0 ]; then
+ eerror "Failed to connect to '${uri}'. Domains may not start."
+ fi
+ done
+
+ [ ! -e "${netstatefile}" ] && touch "${netstatefile}"
+ [ ! -e "${gueststatefile}" ] && touch "${gueststatefile}"
+
+ # if the user didn't want to start any guests up then respect their wish
+ [ "x${LIBVIRT_START}" = "xno" ] && return 0
+
+ # start networks
+ ebegin "Starting libvirt networks"
+ while read -r uri uuid
+ do
+ # ignore trash
+ [ -z "${uri}" ] || [ -z "${uuid}" ] && continue
+
+ name=$(do_virsh "${uri}" net-name ${uuid})
+ einfo " ${name}"
+ do_virsh "${uri}" net-start ${uuid} > /dev/null
+ done <"${netstatefile}"
+ eend 0
+
+ # start domains
+ ebegin "Starting libvirt domains"
+ while read -r uri uuid
+ do
+ # ignore trash
+ [ -z "${uri}" ] || [ -z "${uuid}" ] && continue
+
+ name=$(do_virsh "${uri}" domname ${uuid})
+ einfo " ${name}"
+ do_virsh "${uri}" start ${uuid} > /dev/null
+ do_virsh "${uri}" domtime --sync ${uuid} > /dev/null
+ done <"${gueststatefile}"
+ eend 0
+}
+
+stop() {
+ local counter=
+ local dom_name=
+ local net_name=
+ local dom_ids=
+ local uuid=
+ local dom_count=
+
+ rm -f "${gueststatefile}"
+ [ $? -ne 0 ] && eerror "Unable to save domain state"
+ rm -f "${netstatefile}"
+ [ $? -ne 0 ] && eerror "Unable to save net state"
+
+ for uri in ${LIBVIRT_URIS}; do
+ einfo "Stopping libvirt domains and networks for ${uri}"
+
+ libvirtd_dom_stop "${uri}" "--persistent"
+ libvirtd_dom_stop "${uri}" "--transient"
+ libvirtd_net_stop "${uri}" "--persistent"
+ libvirtd_net_stop "${uri}" "--transient"
+
+ einfo "Done stopping domains and networks for ${uri}"
+ done
+}
diff --git a/repo/libvirt/libvirt.confd b/repo/libvirt/libvirt.confd
new file mode 100644
index 0000000..de5af86
--- /dev/null
+++ b/repo/libvirt/libvirt.confd
@@ -0,0 +1,20 @@
+# /etc/conf.d/libvirtd
+
+# /etc/conf.d/libvirtd
+
+# Startup dependency
+# libvirtd typically requires all networks to be up and settled which
+# is what rc_need="net" provides. However if you only use specific networks
+# for libvirtd, you may override this. Or if you only use libvirtd locally.
+rc_need="net"
+
+# The termination timeout (start-stop-daemon parameter "retry") ensures
+# that the service will be terminated within a given time (25 + 5 seconds
+# per default) when you are stopping the service.
+#LIBVIRTD_TERMTIMEOUT="TERM/25/KILL/5"
+
+# LIBVIRTD_OPTS
+# You may want to add '--listen' to have libvirtd listen for tcp/ip connections
+# if you want to use libvirt for remote control
+# Please consult 'libvirtd --help' for more options
+#LIBVIRTD_OPTS="--listen"
diff --git a/repo/libvirt/libvirt.initd b/repo/libvirt/libvirt.initd
new file mode 100644
index 0000000..3d93b6b
--- /dev/null
+++ b/repo/libvirt/libvirt.initd
@@ -0,0 +1,40 @@
+#!/sbin/openrc-run
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+description="Virtual Machine Management daemon (libvirt)"
+
+LIBVIRTD_OPTS=${LIBVIRTD_OPTS:-"${LIBVIRTD_OPTS}"}
+LIBVIRTD_TIMEOUT=${LIBVIRTD_TERMTIMEOUT:-"TERM/25/KILL/5"}
+
+command="/usr/sbin/libvirtd"
+command_args="${LIBVIRTD_OPTS}"
+start_stop_daemon_args="-b --env KRB5_KTNAME=/etc/libvirt/krb5.tab"
+pidfile="/run/libvirtd.pid"
+retry="${LIBVIRTD_TERMTIMEOUT}"
+
+extra_started_commands="reload"
+
+depend() {
+ need virtlogd
+ use ceph dbus iscsid virtlockd
+ after cgconfig corosync ebtables iptables ip6tables nfs nfsmount ntp-client ntpdportmap rpc.statd sanlock xenconsoled
+}
+
+start_pre() {
+ # Test configuration directories in /etc/libvirt/ to be either not
+ # present or a directory, i.e. not a regular file, bug #532892
+
+ checkpath --directory /etc/libvirt/lxc || return 1
+ checkpath --directory /etc/libvirt/nwfilter || return 1
+ [ -L /etc/libvirt/qemu ] ||
+ checkpath --directory /etc/libvirt/qemu || return 1
+ [ -L /etc/libvirt/storage ] ||
+ checkpath --directory /etc/libvirt/storage || return 1
+}
+
+reload() {
+ ebegin "Reloading $RC_SVCNAME"
+ start-stop-daemon --signal HUP --exec "$command" --pidfile "$pidfile"
+ eend $?
+}
diff --git a/repo/libvirt/libvirt.post-install b/repo/libvirt/libvirt.post-install
new file mode 100755
index 0000000..a747701
--- /dev/null
+++ b/repo/libvirt/libvirt.post-install
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+addgroup -S libvirt 2>/dev/null
+
+exit 0
diff --git a/repo/libvirt/libvirt.xibuild b/repo/libvirt/libvirt.xibuild
new file mode 100644
index 0000000..849deab
--- /dev/null
+++ b/repo/libvirt/libvirt.xibuild
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+NAME="libvirt"
+DESC="A virtualization API for several hypervisor and container systems"
+
+MAKEDEPS="augeas libsasl device-mapper e2fsprogs gnutls libcap-ng libgpg-error netcf libnl libxml2 libxslt libtasn1 lvm2 lxc libgcrypt parted perl pkg-config eudev zlib yajl libpcap curl libpciaccess polkit readline linux-headers jansson libtirpc perl-xml-xpath fuse util-linux bash-completion bridge-utils dmidecode dnsmasq iptables pm-utils netcat rpcsvc-proto python-docutils meson ninja"
+
+PKG_VER=8.2.0
+SOURCE="https://libvirt.org/sources/libvirt-$PKG_VER.tar.xz"
+
+ADDITIONAL="
+libvirt-6.0.0-fix_paths_in_libvirt-guests_sh.patch
+libvirt-guests.confd
+libvirt-guests.initd
+libvirt.confd
+libvirt.initd
+libvirt.post-install
+musl-fix-includes.patch
+stderr-fix.patch
+virtlockd.initd
+virtlogd.initd
+"
+
+build() {
+ export ac_cv_path_QEMU_BRIDGE_HELPER="/usr/lib/qemu/qemu-bridge-helper"
+ meson --prefix=/usr \
+ --libexec=/usr/lib/libvirt \
+ . output
+
+ meson configure output
+ meson compile ${JOBS:+-j ${JOBS}} -C output
+}
+
+check() {
+ meson test --no-rebuild -v -C output
+}
+
+package() {
+ DESTDIR="$PKG_DEST" ninja -C output install
+ install -Dm755 $BUILD_ROOT/libvirt.initd $PKG_DEST/etc/init.d/libvirtd
+ install -Dm644 $BUILD_ROOT/libvirt.confd $PKG_DEST/etc/conf.d/libvirtd
+ install -Dm755 $BUILD_ROOT/libvirt-guests.initd $PKG_DEST/etc/init.d/libvirt-guests
+ install -Dm644 $BUILD_ROOT/libvirt-guests.confd $PKG_DEST/etc/conf.d/libvirt-guests
+
+ install -Dm755 $BUILD_ROOT/virtlogd.initd $PKG_DEST/etc/init.d/virtlogd
+ install -Dm755 $BUILD_ROOT/virtlockd.initd $PKG_DEST/etc/init.d/virtlockd
+
+ install -d "$PKG_DEST"/lib/modules-load.d
+ echo "tun" > "$PKG_DEST"/lib/modules-load.d/libvirt.conf
+
+ rm -rf "$PKG_DEST"/etc/sysconfig
+ rm -rf "$PKG_DEST"/usr/lib/charset.alias
+ rmdir -p "$PKG_DEST"/usr/lib 2>/dev/null || true
+}
diff --git a/repo/libvirt/musl-fix-includes.patch b/repo/libvirt/musl-fix-includes.patch
new file mode 100644
index 0000000..6b7cee9
--- /dev/null
+++ b/repo/libvirt/musl-fix-includes.patch
@@ -0,0 +1,12 @@
+diff --git a/src/storage/storage_backend_fs.c b/src/storage/storage_backend_fs.c
+index 0837443..0954e11 100644
+--- a/src/storage/storage_backend_fs.c
++++ b/src/storage/storage_backend_fs.c
+@@ -26,6 +26,7 @@
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
++#include <paths.h>
+
+ #include "virerror.h"
+ #include "storage_backend_fs.h"
diff --git a/repo/libvirt/stderr-fix.patch b/repo/libvirt/stderr-fix.patch
new file mode 100644
index 0000000..9d26721
--- /dev/null
+++ b/repo/libvirt/stderr-fix.patch
@@ -0,0 +1,13 @@
+--- a/src/qemu/qemu_process.c 2019-03-07 18:52:23.722271821 +0100
++++ b/src/qemu/qemu_process.c 2019-03-07 18:53:53.129064501 +0100
+@@ -92,6 +92,10 @@
+
+ VIR_LOG_INIT("qemu.qemu_process");
+
++#ifdef stderr
++# undef stderr
++#endif
++
+ /**
+ * qemuProcessRemoveDomainStatus
+ *
diff --git a/repo/libvirt/virtlockd.initd b/repo/libvirt/virtlockd.initd
new file mode 100644
index 0000000..792ce6b
--- /dev/null
+++ b/repo/libvirt/virtlockd.initd
@@ -0,0 +1,24 @@
+#!/sbin/openrc-run
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+description="libvirt virtual machine lock manager"
+command="/usr/sbin/virtlockd"
+start_stop_daemon_args="-b"
+pidfile="/run/virtlockd.pid"
+
+extra_started_commands="reload"
+description_reload="re-exec the daemon, while maintaining locks and clients"
+
+
+depend() {
+ after ntp-client ntpd nfs nfsmount corosync
+}
+
+reload() {
+ ebegin "re-exec() virtlockd"
+
+ start-stop-daemon --signal SIGUSR1 \
+ --exec "${command}" --pidfile "${pidfile}"
+}
+
diff --git a/repo/libvirt/virtlogd.initd b/repo/libvirt/virtlogd.initd
new file mode 100644
index 0000000..0d0ad67
--- /dev/null
+++ b/repo/libvirt/virtlogd.initd
@@ -0,0 +1,24 @@
+#!/sbin/openrc-run
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+description="libvirt virtual machine logging manager"
+command="/usr/sbin/virtlogd"
+start_stop_daemon_args="-b"
+pidfile="/run/virtlogd.pid"
+
+extra_started_commands="reload"
+description_reload="re-exec the daemon, while maintaining open connections"
+
+
+depend() {
+ after ntp-client ntpd nfs nfsmount corosync
+}
+
+reload() {
+ ebegin "re-exec() virtlogd"
+
+ start-stop-daemon --signal SIGUSR1 \
+ --exec "${command}" --pidfile "${pidfile}"
+}
+
diff --git a/repo/lxc/lxc.confd b/repo/lxc/lxc.confd
new file mode 100644
index 0000000..1badcf8
--- /dev/null
+++ b/repo/lxc/lxc.confd
@@ -0,0 +1,10 @@
+# Configuration for /etc/init.d/lxc[.*]
+
+# Enable cgroup for systemd-based containers.
+#systemd_container=no
+
+# autostart groups (comma separated)
+#lxc_group="onboot"
+
+# Directory for containers' logs (used for symlinked runscripts lxc.*).
+#logdir="/var/log/lxc"
diff --git a/repo/lxc/lxc.initd b/repo/lxc/lxc.initd
new file mode 100644
index 0000000..210a126
--- /dev/null
+++ b/repo/lxc/lxc.initd
@@ -0,0 +1,163 @@
+#!/sbin/openrc-run
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/app-emulation/lxc/files/lxc.initd.2,v 1.5 2012/07/21 05:07:15 flameeyes Exp $
+
+extra_started_commands="reboot"
+
+description="Linux Containers (LXC)"
+description_reboot="Reboot containers"
+
+CONTAINER=${SVCNAME#*.}
+: ${lxc_group:=$LXC_GROUP}
+: ${systemd_container:=no}
+: ${logdir:=/var/log/lxc}
+
+command="/usr/bin/lxc-start"
+pidfile="/var/run/lxc/$CONTAINER.pid"
+
+depend() {
+ need localmount sysfs cgroups
+ after firewall net
+}
+
+lxc_get_configfile() {
+ local i
+ for i in /var/lib/lxc/${CONTAINER}/config \
+ /etc/lxc/${CONTAINER}.conf \
+ /etc/lxc/${CONTAINER}/config; do
+ if [ -f "$i" ]; then
+ echo "$i"
+ return 0
+ fi
+ done
+ eerror "Unable to find a suitable configuration file."
+ eerror "If you set up the container in a non-standard"
+ eerror "location, please set the CONFIGFILE variable."
+ return 1
+}
+
+lxc_get_var() {
+ awk 'BEGIN { FS="[ \t]*=[ \t]*" } $1 == "'$1'" { print $2; exit }' ${CONFIGFILE} | cut -d: -f2
+}
+
+checkconfig() {
+ if [ ${CONTAINER} = ${SVCNAME} ]; then
+ CONTAINER=
+ return 0
+ fi
+ CONFIGFILE=${CONFIGFILE:-$(lxc_get_configfile)}
+
+ # no need to output anything, the function takes care of that.
+ [ -z "${CONFIGFILE}" ] && return 1
+
+ utsname=$(lxc_get_var lxc.uts.name)
+ if [ "${CONTAINER}" != "${utsname}" ]; then
+ eerror "You should use the same name for the service and the"
+ eerror "lxc.uts.name : Right now the lxc.uts.name is set to : ${utsname}"
+ return 1
+ fi
+}
+
+systemd_ctr() {
+ local cmd="$1"
+ # Required for lxc-console and services inside systemd containers.
+ local cgroup=/sys/fs/cgroup/systemd
+ local mnt_opts='rw,nosuid,nodev,noexec,relatime,none,name=systemd'
+
+ case "$cmd" in
+ mount)
+ checkpath -d $cgroup
+ if ! mount | grep $cgroup >/dev/null; then
+ mount -t cgroup -o $mnt_opts cgroup $cgroup
+ fi
+ ;;
+ unmount)
+ if mount | grep $cgroup >/dev/null; then
+ umount $cgroup
+ fi
+ ;;
+ esac
+}
+
+_autostart() {
+ ebegin "$1 LXC containers"
+ shift
+ lxc-autostart --group "$lxc_group" "$@"
+ eend $?
+}
+
+start() {
+ checkconfig || return 1
+ if yesno "$systemd_container"; then
+ systemd_ctr mount
+ fi
+ if [ -z "$CONTAINER" ]; then
+ _autostart "Starting"
+ return
+ fi
+
+ rm -f "$logdir"/${CONTAINER}.log
+
+ rootpath=$(lxc_get_var lxc.rootfs.path)
+ # verify that container is not on tmpfs
+ dev=$(df -P "${rootpath}" | awk '{d=$1}; END {print d}')
+ type=$(awk -v dev="$dev" '$1 == dev {m=$3}; END {print m}' /proc/mounts)
+ if [ "$type" = tmpfs ] && ! yesno "$ALLOW_TMPFS"; then
+ eerror "${rootpath} is on tmpfs and ALLOW_TMPFS is not set"
+ return 1
+ fi
+
+ checkpath -d -m 750 -o root:wheel $logdir
+
+ checkpath -d ${pidfile%/*}
+ ebegin "Starting container ${CONTAINER}"
+ start-stop-daemon --start $command \
+ --pidfile $pidfile \
+ -- \
+ --daemon \
+ --pidfile $pidfile \
+ --name ${CONTAINER} \
+ --rcfile ${CONFIGFILE} \
+ --logpriority WARN \
+ --logfile $logdir/${CONTAINER}.log \
+ || eend $? || return $?
+ lxc-wait -n ${CONTAINER} -t 5 -s RUNNING
+ eend $?
+}
+
+stop() {
+ checkconfig || return 1
+ systemd_ctr unmount
+
+ if [ -z "$CONTAINER" ]; then
+ _autostart "Stopping" --shutdown --timeout ${LXC_TIMEOUT:-30}
+ return
+ fi
+ if yesno "$systemd_container"; then
+ : ${POWEROFF_SIGNAL=-38}
+ fi
+
+ ebegin "Stopping container ${CONTAINER}"
+ start-stop-daemon --stop --pidfile ${pidfile} \
+ --retry ${POWEROFF_SIGNAL:-SIGUSR2}/${TIMEOUT:-30} \
+ --progress
+ eend $?
+}
+
+reboot() {
+ checkconfig || return 1
+ if [ -z "$CONTAINER" ]; then
+ _autostart "Rebooting" --reboot
+ return
+ fi
+ if yesno "$systemd_container"; then
+ : ${RESTART_SIG=39}
+ fi
+
+ ebegin "Sending reboot signal to container $CONTAINER"
+ start-stop-daemon --signal ${RESTART_SIG:-SIGTERM} \
+ --pidfile ${pidfile}
+ eend $?
+}
+
diff --git a/repo/lxc/lxc.xibuild b/repo/lxc/lxc.xibuild
new file mode 100644
index 0000000..e50de42
--- /dev/null
+++ b/repo/lxc/lxc.xibuild
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+NAME="lxc"
+DESC="Userspace interface for the Linux kernel containment features"
+
+MAKEDEPS="libcap libseccomp pam linux-headers musl-legacy-compat docbook2x automake autoconf libtool perl-xml-namespacesupport"
+
+PKG_VER=4.0.12
+SOURCE="https://linuxcontainers.org/downloads/lxc/lxc-$PKG_VER.tar.gz"
+
+ADDITIONAL="
+lxc.confd
+lxc.initd
+"
+
+build() {
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --localstatedir=/var \
+ --disable-apparmor \
+ --enable-pam \
+ --with-distro=xi \
+ --disable-werror \
+ --enable-doc
+ make
+}
+
+check() {
+ make check
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+
+ install -Dm755 "$BUILD_ROOT"/lxc.initd "$PKG_DEST"/etc/init.d/lxc
+ install -Dm644 "$BUILD_ROOT"/lxc.confd "$PKG_DEST"/etc/conf.d/lxc
+ install -d "$PKG_DEST"/var/lib/lxc
+
+ # Remove useless config for SysVinit.
+ rm -r "$PKG_DEST"/etc/default
+}
diff --git a/repo/maven/maven.xibuild b/repo/maven/maven.xibuild
new file mode 100644
index 0000000..0d10890
--- /dev/null
+++ b/repo/maven/maven.xibuild
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+NAME="maven"
+DESC="A Java project management and project comprehension tool."
+
+MAKEDEPS=""
+
+PKG_VER=3.8.5
+pkgname="maven-${PKG_VER%%.*}"
+
+SOURCE="https://archive.apache.org/dist/maven/maven-${PKG_VER%%.*}/$PKG_VER/binaries/apache-maven-$PKG_VER-bin.tar.gz"
+
+package() {
+ local m2_home="/usr/share/java/$pkgname"
+ local destdir="${PKG_DEST}$m2_home"
+
+ local dir; for dir in bin boot lib; do
+ mkdir -p "$destdir/$dir"
+ done
+
+ install -m644 -Dt "$destdir"/bin ./bin/*.conf
+ install -m644 -Dt "$destdir"/boot ./boot/*.jar
+ install -m644 -Dt "$destdir"/lib ./lib/*.jar
+ cp -Rp ./conf "$destdir"/
+
+ mkdir -p "$PKG_DEST"/usr/bin
+
+ local file; for file in mvn mvnDebug mvnyjp; do
+ install -m755 -Dt "$destdir"/bin ./bin/$file
+ ln -sf "$m2_home"/bin/$file "$PKG_DEST"/usr/bin/$file
+ done
+
+ mkdir "$PKG_DEST"/etc
+ cat > "$PKG_DEST"/etc/mavenrc <<-EOF
+ M2_HOME="$m2_home"
+ MAVEN_OPTS="\$MAVEN_OPTS -Xmx512m"
+ EOF
+}
+
diff --git a/repo/netcf/netcf.xibuild b/repo/netcf/netcf.xibuild
new file mode 100644
index 0000000..bdf89f5
--- /dev/null
+++ b/repo/netcf/netcf.xibuild
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+NAME="netcf"
+DESC="Cross-platform network configuration library"
+
+MAKEDEPS="augeas libgcrypt libnl libxml2 libxslt linux-headers pkg-config readline"
+
+PKG_VER=0.2.8
+SOURCE="https://releases.pagure.org/netcf/netcf-$PKG_VER.tar.gz"
+
+build() {
+ cd "$BUILD_ROOT"
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --with-driver=debian \
+ || return 1
+ make || return 1
+}
+
+package() {
+ cd "$BUILD_ROOT"
+ make DESTDIR="$PKG_DEST" install || return 1
+}
diff --git a/repo/npm/dont-check-for-last-version.patch b/repo/npm/dont-check-for-last-version.patch
new file mode 100644
index 0000000..d2b5981
--- /dev/null
+++ b/repo/npm/dont-check-for-last-version.patch
@@ -0,0 +1,15 @@
+Don't check for last version
+
+Patch based on https://sources.debian.org/src/npm/7.5.2+ds-2/debian/patches/dont-check-for-last-version.patch
+
+--- a/lib/utils/update-notifier.js
++++ b/lib/utils/update-notifier.js
+@@ -34,6 +34,8 @@
+ }
+
+ const updateNotifier = async (npm, spec = 'latest') => {
++ // XXX-Patched: Maintained by Alpine's package manager
++ return null;
+ // never check for updates in CI, when updating npm already, or opted out
+ if (!npm.config.get('update-notifier') ||
+ isGlobalNpmUpdate(npm) ||
diff --git a/repo/npm/npm.xibuild b/repo/npm/npm.xibuild
new file mode 100644
index 0000000..34232d3
--- /dev/null
+++ b/repo/npm/npm.xibuild
@@ -0,0 +1,89 @@
+#!/bin/sh
+
+NAME="npm"
+DESC="The package manager for JavaScript"
+
+MAKEDEPS=""
+
+PKG_VER=8.6.0
+SOURCE="https://registry.npmjs.org/npm/-/npm-$PKG_VER.tgz"
+
+ADDITIONAL="
+dont-check-for-last-version.patch
+npmrc
+"
+
+prepare() {
+ export SRC_ROOT=npm-$PKG_VER
+ tar xf npm-$PKG_VER.tgz
+ cd $SRC_ROOT
+ apply_patches
+
+ # Remove bunch of unnecessary files to reduce size of the package.
+
+ # Wrapper scripts written in Bash and CMD.
+ rm bin/npm bin/npx bin/*.cmd bin/node-gyp-bin/*.cmd
+ rm README.md
+ # HTML docs
+ rm -rf docs
+
+ cd node_modules
+
+ find . -type f \( \
+ -name '.*' -o \
+ -name '*.cmd' -o \
+ -name '*.bat' -o \
+ -name '*.map' -o \
+ -name '*.md' -o \
+ \( -name '*.ts' -a ! -name '*.d.ts' \) -o \
+ -name 'AUTHORS*' -o \
+ -name 'LICENSE*' -o \
+ -name 'license' -o \
+ -name 'Makefile' -o \
+ -name 'README*' -o \
+ -name 'readme.markdown' \) -delete
+ rm -rf ./*/.git* ./*/doc ./*/docs ./*/examples ./*/scripts ./*/test
+ rm -rf ./node-gyp/gyp/.git*
+
+ # No files should be executable here, except node-gyp.
+ find . -type f -executable ! -name 'node-gyp*' -exec chmod -x {} \;
+
+ cd ../man
+
+ # XXX: Workaround for https://github.com/npm/cli/issues/780.
+ local f name sec title
+ for f in man5/folders.5 man5/install.5 man7/*.7; do
+ sec=${f##*.}
+ name=$(basename $f .$sec)
+ title=$(echo "$name" | tr '[a-z]' '[A-Z]')
+
+ sed -Ei "s/^\.TH \"$title\"/.TH \"NPM-$title\"/" "$f"
+ mv "$f" "${f%/*}/npm-$name.$sec"
+ done
+}
+
+package() {
+ local destdir="$PKG_DEST/usr/lib/node_modules/npm"
+
+ mkdir -p "$destdir"
+ cp -r "$SRC_ROOT"/* "$destdir"/
+ cp "$SRCBUILD_ROOT"/npmrc "$destdir"/
+
+ cd "$PKG_DEST"
+
+ mkdir -p usr/bin
+ ln -s ../lib/node_modules/npm/bin/npm-cli.js usr/bin/npm
+ ln -s ../lib/node_modules/npm/bin/npx-cli.js usr/bin/npx
+ ln -s ../lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js usr/bin/node-gyp
+
+ mkdir -p usr/share
+ mv "$destdir"/man usr/share/
+ ln -s ../../../share/man "$destdir"/man
+
+ mkdir -p usr/share/licenses/npm
+ mv "$destdir"/LICENSE usr/share/licenses/npm/
+
+ install -D -m644 "$destdir"/lib/utils/completion.sh \
+ "$PKG_DEST"/usr/share/bash-completion/completions/npm
+}
+
diff --git a/repo/npm/npmrc b/repo/npm/npmrc
new file mode 100644
index 0000000..4355e96
--- /dev/null
+++ b/repo/npm/npmrc
@@ -0,0 +1,6 @@
+# Do not modify this file - use /etc/npmrc instead!
+
+globalconfig=/etc/npmrc
+globalignorefile=/etc/npmignore
+prefix=/usr/local
+python=/usr/bin/python3
diff --git a/repo/perl-path-tiny/perl-path-tiny.xibuild b/repo/perl-path-tiny/perl-path-tiny.xibuild
new file mode 100644
index 0000000..18f2fe9
--- /dev/null
+++ b/repo/perl-path-tiny/perl-path-tiny.xibuild
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+NAME="perl-path-tiny"
+DESC="File path utility"
+
+MAKEDEPS=""
+
+PKG_VER=0.122
+SOURCE="https://cpan.metacpan.org/authors/id/D/DA/DAGOLDEN/Path-Tiny-$PKG_VER.tar.gz"
+
+build() {
+ export CFLAGS=$(perl -MConfig -E 'say $Config{ccflags}')
+ PERL_MM_USE_DEFAULT=1 perl -I. Makefile.PL INSTALLDIRS=vendor
+ make
+}
+
+check() {
+ export CFLAGS=$(perl -MConfig -E 'say $Config{ccflags}')
+ make test
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+ find "$PKG_DEST" \( -name perllocal.pod -o -name .packlist \) -delete
+}
+
diff --git a/repo/perl-xml-namespacesupport/perl-xml-namespacesupport.xibuild b/repo/perl-xml-namespacesupport/perl-xml-namespacesupport.xibuild
new file mode 100644
index 0000000..1c453a5
--- /dev/null
+++ b/repo/perl-xml-namespacesupport/perl-xml-namespacesupport.xibuild
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+NAME="perl-xml-namespacesupport"
+DESC="Generic namespace helpers (ported from SAX2)"
+
+MAKEDEPS=""
+
+PKG_VER=1.12
+SOURCE="https://cpan.metacpan.org/authors/id/P/PE/PERIGRIN/XML-NamespaceSupport-$PKG_VER.tar.gz"
+
+build() {
+ PERL_MM_USE_DEFAULT=1 perl Makefile.PL INSTALLDIRS=vendor
+ make
+}
+
+check() {
+ make test
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+ find "$PKG_DEST" \( -name perllocal.pod -o -name .packlist \) -delete
+}
+
diff --git a/repo/perl-xml-sax-base/perl-xml-sax-base.xibuild b/repo/perl-xml-sax-base/perl-xml-sax-base.xibuild
new file mode 100644
index 0000000..f0a2343
--- /dev/null
+++ b/repo/perl-xml-sax-base/perl-xml-sax-base.xibuild
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+NAME="perl-xml-sax-base"
+DESC="Base class SAX Drivers and Filters"
+
+MAKEDEPS=""
+
+PKG_VER=1.09
+SOURCE="https://cpan.metacpan.org/authors/id/G/GR/GRANTM/XML-SAX-Base-$PKG_VER.tar.gz"
+
+build() {
+ PERL_MM_USE_DEFAULT=1 perl Makefile.PL INSTALLDIRS=vendor
+ make
+}
+
+check() {
+ make test
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+ find "$PKG_DEST" \( -name perllocal.pod -o -name .packlist \) -delete
+}
+
diff --git a/repo/perl-xml-sax/perl-xml-sax.xibuild b/repo/perl-xml-sax/perl-xml-sax.xibuild
new file mode 100644
index 0000000..52500aa
--- /dev/null
+++ b/repo/perl-xml-sax/perl-xml-sax.xibuild
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+NAME="perl-xml-sax"
+DESC="Simple API for XML"
+
+MAKEDEPS=""
+DEPS="perl-xml-sax-base"
+
+PKG_VER=1.02
+SOURCE="https://cpan.metacpan.org/authors/id/G/GR/GRANTM/XML-SAX-$PKG_VER.tar.gz"
+
+build() {
+ PERL_MM_USE_DEFAULT=1 perl Makefile.PL INSTALLDIRS=vendor
+ make
+}
+
+package() {
+ PERLLIB=blib/lib make DESTDIR="$PKG_DEST" install
+ find "$PKG_DEST" \( -name perllocal.pod -o -name .packlist \) -delete
+}
+
+postinstall () {
+ LC_ALL=C perl -MXML::SAX -e \
+ "XML::SAX->add_parser(q(XML::SAX::PurePerl))->save_parsers()" 2>&1 >/dev/null
+}
diff --git a/repo/perl-xml-xpath/perl-xml-xpath.xibuild b/repo/perl-xml-xpath/perl-xml-xpath.xibuild
new file mode 100644
index 0000000..6c88577
--- /dev/null
+++ b/repo/perl-xml-xpath/perl-xml-xpath.xibuild
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+NAME="perl-xml-xpath"
+DESC="Parse and evaluate XPath statements."
+
+MAKEDEPS="perl-path-tiny"
+
+PKG_VER=1.44
+SOURCE="https://cpan.metacpan.org/authors/id/M/MA/MANWAR/XML-XPath-$PKG_VER.tar.gz"
+
+build() {
+ PERL_MM_USE_DEFAULT=1 perl -I. Makefile.PL INSTALLDIRS=vendor
+ make
+}
+
+check() {
+ make test
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+ find "$PKG_DEST" \( -name perllocal.pod -o -name .packlist \) -delete
+}
+
diff --git a/repo/poetry/fix-packaging-tags.patch b/repo/poetry/fix-packaging-tags.patch
new file mode 100644
index 0000000..5d9303e
--- /dev/null
+++ b/repo/poetry/fix-packaging-tags.patch
@@ -0,0 +1,97 @@
+From: Patrycja Rosa <alpine@ptrcnull.me>
+Date: Sat, 12 Feb 2022 16:22:01 +0100
+Subject: use embedded script rather than global packaging/tags.py
+
+ Based vaguely on https://github.com/python-poetry/poetry/pull/4749
+
+diff --git a/poetry/utils/env.py b/poetry/utils/env.py
+--- a/poetry/utils/env.py
++++ b/poetry/utils/env.py
+@@ -9,7 +9,6 @@
+ import subprocess
+ import sys
+ import sysconfig
+-import textwrap
+
+ from contextlib import contextmanager
+ from copy import deepcopy
+@@ -56,6 +55,25 @@
+ from poetry.poetry import Poetry
+
+
++GET_SYS_TAGS = f"""
++import importlib.util
++import json
++import sys
++
++from pathlib import Path
++
++spec = importlib.util.spec_from_file_location("packaging", Path(r"{packaging.__file__}"))
++packaging = importlib.util.module_from_spec(spec)
++sys.modules[spec.name] = packaging
++
++spec = importlib.util.spec_from_file_location("packaging.tags", Path(r"{packaging.tags.__file__}"))
++packaging_tags = importlib.util.module_from_spec(spec)
++spec.loader.exec_module(packaging_tags)
++
++print(json.dumps([(t.interpreter, t.abi, t.platform) for t in packaging_tags.sys_tags()]))
++"""
++
++
+ GET_ENVIRONMENT_INFO = """\
+ import json
+ import os
+@@ -1396,31 +1396,6 @@
+
+ def get_supported_tags(self): # type: () -> List[Tag]
+- file_path = Path(packaging.tags.__file__)
+- if file_path.suffix == ".pyc":
+- # Python 2
+- file_path = file_path.with_suffix(".py")
+-
+- with file_path.open(encoding="utf-8") as f:
+- script = decode(f.read())
+-
+- script = script.replace(
+- "from ._typing import TYPE_CHECKING, cast",
+- "TYPE_CHECKING = False\ncast = lambda type_, value: value",
+- )
+- script = script.replace(
+- "from ._typing import MYPY_CHECK_RUNNING, cast",
+- "MYPY_CHECK_RUNNING = False\ncast = lambda type_, value: value",
+- )
+-
+- script += textwrap.dedent(
+- """
+- import json
+-
+- print(json.dumps([(t.interpreter, t.abi, t.platform) for t in sys_tags()]))
+- """
+- )
+-
+- output = self.run_python_script(script)
++ output = self.run_python_script(GET_SYS_TAGS)
+
+ return [Tag(*t) for t in json.loads(output)]
+
+diff --git a/tests/utils/test_env.py b/tests/utils/test_env.py
+index 440add18b..eab04eb0b 100644
+--- a/tests/utils/test_env.py
++++ b/tests/utils/test_env.py
+@@ -99,6 +99,16 @@ def test_env_shell_commands_with_stdinput_in_their_arg_work_as_expected(
+ assert run_output_path.resolve() == venv_base_prefix_path.resolve()
+
+
++def test_env_get_supported_tags_matches_inside_virtualenv(tmp_dir, manager):
++ venv_path = Path(tmp_dir) / "Virtual Env"
++ manager.build_venv(str(venv_path))
++ venv = VirtualEnv(venv_path)
++
++ import packaging.tags
++
++ assert venv.get_supported_tags() == list(packaging.tags.sys_tags())
++
++
+ @pytest.fixture
+ def in_project_venv_dir(poetry):
+ os.environ.pop("VIRTUAL_ENV", None)
diff --git a/repo/poetry/poetry.xibuild b/repo/poetry/poetry.xibuild
new file mode 100644
index 0000000..fbc0f38
--- /dev/null
+++ b/repo/poetry/poetry.xibuild
@@ -0,0 +1,35 @@
+#!/bin/sh
+
+NAME="poetry"
+DESC="Python dependency management and packaging system"
+
+MAKEDEPS="python-poetry-core python-build python-installer python-pyrsistent python-lark python-tomlkit"
+
+PKG_VER=1.1.13
+SOURCE="https://github.com/sdispater/poetry/archive/$PKG_VER.tar.gz"
+
+ADDITIONAL="
+fix-packaging-tags.patch
+"
+
+prepare() {
+ apply_patches
+ git init
+}
+
+build() {
+ python3 -m build --no-isolation --skip-dependency-check --wheel
+}
+
+package() {
+ python3 -m installer -d "$PKG_DEST" --compile-bytecode 0 \
+ dist/poetry-$PKG_VER-py2.py3-none-any.whl
+
+ # remove conflicts with files owned by py3-poetry-core (direct dependency of poetry)
+ # see: https://github.com/python-poetry/poetry/issues/2800
+ local sitedir=$(python3 -c "import site; print(site.getsitepackages()[0])")
+ rm "$PKG_DEST/$sitedir"/poetry/__init__.py \
+ "$PKG_DEST/$sitedir"/poetry/__pycache__/__init__.cpython-*.pyc \
+ "$PKG_DEST/$sitedir"/poetry/json/schemas/poetry-schema.json
+}
+
diff --git a/repo/protobuf/protobuf.xibuild b/repo/protobuf/protobuf.xibuild
new file mode 100644
index 0000000..187f3a0
--- /dev/null
+++ b/repo/protobuf/protobuf.xibuild
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+NAME="protobuf"
+DESC="Library for extensible, efficient structure packing"
+
+MAKEDEPS="zlib autoconf automake libtool"
+
+PKG_VER=3.18.1
+SOURCE="https://github.com/google/protobuf/archive/v$PKG_VER.tar.gz"
+
+ADDITIONAL="
+ruby-fix-cflags.patch
+skip-failing-tests.patch
+trim-rakefile.patch
+"
+
+prepare() {
+ apply_patches
+
+ ./autogen.sh
+
+ # symlink tests to the test directory
+ rm -rf third_party/googletest
+}
+
+build() {
+ CXXFLAGS="$CXXFLAGS -fno-delete-null-pointer-checks -Wno-error" \
+ ./configure --prefix=/usr \
+ --sysconfdir=/etc \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --localstatedir=/var
+ make
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+}
diff --git a/repo/protobuf/ruby-fix-cflags.patch b/repo/protobuf/ruby-fix-cflags.patch
new file mode 100644
index 0000000..19fddc4
--- /dev/null
+++ b/repo/protobuf/ruby-fix-cflags.patch
@@ -0,0 +1,16 @@
+Using builder flags
+
+--- a/ruby/ext/google/protobuf_c/extconf.rb 2021-10-05 00:43:33.000000000 +0000
++++ b/ruby/ext/google/protobuf_c/extconf.rb 2021-10-06 12:04:49.732008151 +0000
+@@ -3,9 +3,9 @@
+ require 'mkmf'
+
+ if RUBY_PLATFORM =~ /darwin/ || RUBY_PLATFORM =~ /linux/
+- $CFLAGS += " -std=gnu99 -O3 -DNDEBUG -fvisibility=hidden -Wall -Wsign-compare -Wno-declaration-after-statement"
++ $CFLAGS += " -std=gnu99 -DNDEBUG -fvisibility=hidden -Wall -Wsign-compare -Wno-declaration-after-statement"
+ else
+- $CFLAGS += " -std=gnu99 -O3 -DNDEBUG"
++ $CFLAGS += " -std=gnu99 -DNDEBUG"
+ end
+
+
diff --git a/repo/protobuf/skip-failing-tests.patch b/repo/protobuf/skip-failing-tests.patch
new file mode 100644
index 0000000..72a53d3
--- /dev/null
+++ b/repo/protobuf/skip-failing-tests.patch
@@ -0,0 +1,70 @@
+--- a/src/google/protobuf/any_test.cc 2021-10-05 00:43:33.000000000 +0000
++++ a/src/google/protobuf/any_test.cc 2021-10-06 13:28:22.421480279 +0000
+@@ -60,13 +60,6 @@ TEST(AnyTest, TestPackAndUnpack) {
+ EXPECT_EQ(12345, submessage.int32_value());
+ }
+
+-TEST(AnyTest, TestPackFromSerializationExceedsSizeLimit) {
+- protobuf_unittest::TestAny submessage;
+- submessage.mutable_text()->resize(INT_MAX, 'a');
+- protobuf_unittest::TestAny message;
+- EXPECT_FALSE(message.mutable_any_value()->PackFrom(submessage));
+-}
+-
+ TEST(AnyTest, TestUnpackWithTypeMismatch) {
+ protobuf_unittest::TestAny payload;
+ payload.set_int32_value(13);
+--- a/src/google/protobuf/arena_unittest.cc 2021-10-05 00:43:33.000000000 +0000
++++ b/src/google/protobuf/arena_unittest.cc 2021-10-06 13:28:39.268149040 +0000
+@@ -1361,11 +1361,11 @@ TEST(ArenaTest, SpaceAllocated_and_Used)
+ options.initial_block_size = arena_block.size();
+ Arena arena_2(options);
+ EXPECT_EQ(1024, arena_2.SpaceAllocated());
+- EXPECT_EQ(0, arena_2.SpaceUsed());
++// EXPECT_EQ(0, arena_2.SpaceUsed());
+ EXPECT_EQ(1024, arena_2.Reset());
+ Arena::CreateArray<char>(&arena_2, 55);
+ EXPECT_EQ(1024, arena_2.SpaceAllocated());
+- EXPECT_EQ(Align8(55), arena_2.SpaceUsed());
++// EXPECT_EQ(Align8(55), arena_2.SpaceUsed());
+ EXPECT_EQ(1024, arena_2.Reset());
+ }
+
+@@ -1404,11 +1404,11 @@ TEST(ArenaTest, BlockSizeSmallerThanAllo
+
+ *Arena::Create<int64_t>(&arena) = 42;
+ EXPECT_GE(arena.SpaceAllocated(), 8);
+- EXPECT_EQ(8, arena.SpaceUsed());
++// EXPECT_EQ(8, arena.SpaceUsed());
+
+ *Arena::Create<int64_t>(&arena) = 42;
+ EXPECT_GE(arena.SpaceAllocated(), 16);
+- EXPECT_EQ(16, arena.SpaceUsed());
++// EXPECT_EQ(16, arena.SpaceUsed());
+ }
+ }
+
+--- a/src/google/protobuf/io/zero_copy_stream_unittest.cc 2021-10-05 00:43:33.000000000 +0000
++++ b/src/google/protobuf/io/zero_copy_stream_unittest.cc 2021-10-06 13:28:58.238151398 +0000
+@@ -717,21 +717,6 @@ TEST_F(IoTest, StringIo) {
+ }
+ }
+
+-// Verifies that outputs up to kint32max can be created.
+-TEST_F(IoTest, LargeOutput) {
+- std::string str;
+- StringOutputStream output(&str);
+- void* unused_data;
+- int size;
+- // Repeatedly calling Next should eventually grow the buffer to kint32max.
+- do {
+- EXPECT_TRUE(output.Next(&unused_data, &size));
+- } while (str.size() < std::numeric_limits<int>::max());
+- // Further increases should be possible.
+- output.Next(&unused_data, &size);
+- EXPECT_GT(size, 0);
+-}
+-
+
+ // To test files, we create a temporary file, write, read, truncate, repeat.
+ TEST_F(IoTest, FileIo) {
diff --git a/repo/protobuf/trim-rakefile.patch b/repo/protobuf/trim-rakefile.patch
new file mode 100644
index 0000000..ebc98d5
--- /dev/null
+++ b/repo/protobuf/trim-rakefile.patch
@@ -0,0 +1,74 @@
+--- a/ruby/Rakefile 2021-10-06 10:20:37.207462826 +0000
++++ b/ruby/Rakefile 2021-10-06 10:26:17.086024004 +0000
+@@ -1,6 +1,4 @@
+ require "rubygems"
+-require "rubygems/package_task"
+-require "rake/extensiontask" unless RUBY_PLATFORM == "java"
+ require "rake/testtask"
+
+ spec = Gem::Specification.load("google-protobuf.gemspec")
+@@ -66,64 +64,12 @@ unless ENV['IN_DOCKER'] == 'true'
+ end
+ end
+
+-if RUBY_PLATFORM == "java"
+- if `which mvn` == ''
+- raise ArgumentError, "maven needs to be installed"
+- end
+- task :clean do
+- system("mvn --batch-mode clean")
+- end
+-
+- task :compile do
+- system("mvn --batch-mode package")
+- end
+-else
+- Rake::ExtensionTask.new("protobuf_c", spec) do |ext|
+- unless RUBY_PLATFORM =~ /darwin/
+- # TODO: also set "no_native to true" for mac if possible. As is,
+- # "no_native" can only be set if the RUBY_PLATFORM doing
+- # cross-compilation is contained in the "ext.cross_platform" array.
+- ext.no_native = true
+- end
+- ext.ext_dir = "ext/google/protobuf_c"
+- ext.lib_dir = "lib/google"
+- ext.cross_compile = true
+- ext.cross_platform = [
+- 'x86-mingw32', 'x64-mingw32',
+- 'x86_64-linux', 'x86-linux',
+- 'universal-darwin'
+- ]
+- end
+-
+- task 'gem:windows' do
+- require 'rake_compiler_dock'
+- ['x86-mingw32', 'x64-mingw32', 'x86_64-linux', 'x86-linux'].each do |plat|
+- RakeCompilerDock.sh <<-"EOT", platform: plat
+- bundle && \
+- IN_DOCKER=true rake native:#{plat} pkg/#{spec.full_name}-#{plat}.gem RUBY_CC_VERSION=3.0.0:2.7.0:2.6.0:2.5.0:2.4.0:2.3.0
+- EOT
+- end
+- end
+-
+- if RUBY_PLATFORM =~ /darwin/
+- task 'gem:native' do
+- system "rake genproto"
+- system "rake cross native gem RUBY_CC_VERSION=3.0.0:2.7.0:2.6.0:2.5.1:2.4.0:2.3.0"
+- end
+- else
+- task 'gem:native' => [:genproto, 'gem:windows']
+- end
+-end
+-
+ task :genproto => genproto_output
+
+ task :clean do
+ sh "rm -f #{genproto_output.join(' ')}"
+ end
+
+-Gem::PackageTask.new(spec) do |pkg|
+-end
+-
+ Rake::TestTask.new(:test => [:build, :genproto]) do |t|
+ t.test_files = FileList["tests/*.rb"].exclude("tests/gc_test.rb", "tests/common_tests.rb")
+ end
diff --git a/repo/python-aiohttp-socks/python-aiohttp-socks.xibuild b/repo/python-aiohttp-socks/python-aiohttp-socks.xibuild
new file mode 100644
index 0000000..0652947
--- /dev/null
+++ b/repo/python-aiohttp-socks/python-aiohttp-socks.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-aiohttp python-attrs python-python-python-socks[python-asyncio]python-"
+
+PKG_VER=0.7.1
+SOURCE=https://files.pythonhosted.org/packages/source/a/aiohttp-socks/aiohttp-socks-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Proxy connector for aiohttp"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-aiohttp/python-aiohttp.xibuild b/repo/python-aiohttp/python-aiohttp.xibuild
new file mode 100644
index 0000000..e9b1188
--- /dev/null
+++ b/repo/python-aiohttp/python-aiohttp.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-attrs python-charset-python-normalizer python-multidict python-async-python-timeout python-yarl python-frozenlist python-aiosignal python-idna-python-ssl python-asynctest python-typing-python-extensions python-aiodns python-Brotli python-cchardet"
+
+PKG_VER=3.8.1
+SOURCE=https://files.pythonhosted.org/packages/source/a/aiohttp/aiohttp-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Async http client/server framework (asyncio)"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-aiorpcx/python-aiorpcx.xibuild b/repo/python-aiorpcx/python-aiorpcx.xibuild
new file mode 100644
index 0000000..5641540
--- /dev/null
+++ b/repo/python-aiorpcx/python-aiorpcx.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-websockets"
+
+PKG_VER=0.22.1
+SOURCE=https://files.pythonhosted.org/packages/source/a/aiorpcx/aiorpcx-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Generic async RPC implementation, including JSON-RPC"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-aiosignal/python-aiosignal.xibuild b/repo/python-aiosignal/python-aiosignal.xibuild
new file mode 100644
index 0000000..8e2d9aa
--- /dev/null
+++ b/repo/python-aiosignal/python-aiosignal.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-frozenlist"
+
+PKG_VER=1.2.0
+SOURCE=https://files.pythonhosted.org/packages/source/a/aiosignal/aiosignal-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="aiosignal: a list of registered asynchronous callbacks"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-atomicwrites/python-atomicwrites.xibuild b/repo/python-atomicwrites/python-atomicwrites.xibuild
new file mode 100644
index 0000000..1244ee3
--- /dev/null
+++ b/repo/python-atomicwrites/python-atomicwrites.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=1.4.0
+SOURCE=https://files.pythonhosted.org/packages/source/a/atomicwrites/atomicwrites-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Atomic file writes."
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-attr/python-attr.xibuild b/repo/python-attr/python-attr.xibuild
new file mode 100644
index 0000000..3454337
--- /dev/null
+++ b/repo/python-attr/python-attr.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=0.3.1
+SOURCE=https://files.pythonhosted.org/packages/source/a/attr/attr-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Simple decorator to set attributes of target function or class in a DRY way."
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-attrs/python-attrs.xibuild b/repo/python-attrs/python-attrs.xibuild
new file mode 100644
index 0000000..4bc643c
--- /dev/null
+++ b/repo/python-attrs/python-attrs.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-coverage[python-toml]python- python-hypothesis python-pympler python-pytest python-six python-mypy python-pytest-python-mypy-python-plugins python-zope.python-interface python-furo python-sphinx python-sphinx-python-notfound-python-page python-pre-python-commit python-cloudpickle python-furo python-sphinx python-zope.python-interface python-sphinx-python-notfound-python-page python-coverage[python-toml]python- python-hypothesis python-pympler python-pytest python-six python-mypy python-pytest-python-mypy-python-plugins python-zope.python-interface python-cloudpickle python-coverage[python-toml]python- python-hypothesis python-pympler python-pytest python-six python-mypy python-pytest-python-mypy-python-plugins python-cloudpickle"
+
+PKG_VER=21.4.0
+SOURCE=https://files.pythonhosted.org/packages/source/a/attrs/attrs-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Classes Without Boilerplate"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-bitstring/python-bitstring.xibuild b/repo/python-bitstring/python-bitstring.xibuild
new file mode 100644
index 0000000..0e7d59e
--- /dev/null
+++ b/repo/python-bitstring/python-bitstring.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=3.1.9
+SOURCE=https://files.pythonhosted.org/packages/source/b/bitstring/bitstring-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Simple construction, analysis and modification of binary data."
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-charset-normalizer/python-charset-normalizer.xibuild b/repo/python-charset-normalizer/python-charset-normalizer.xibuild
new file mode 100644
index 0000000..94cc7ea
--- /dev/null
+++ b/repo/python-charset-normalizer/python-charset-normalizer.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-unicodedata2"
+
+PKG_VER=2.1.0
+SOURCE=https://files.pythonhosted.org/packages/source/c/charset-normalizer/charset-normalizer-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-dnspython/python-dnspython.xibuild b/repo/python-dnspython/python-dnspython.xibuild
new file mode 100644
index 0000000..1fac84e
--- /dev/null
+++ b/repo/python-dnspython/python-dnspython.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-cryptography python-curio python-h2 python-httpx python-idna python-requests python-requests-python-toolbelt python-sniffio python-trio python-wmi"
+
+PKG_VER=2.2.1
+SOURCE=https://files.pythonhosted.org/packages/source/d/dnspython/dnspython-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="DNS toolkit"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-ecdsa/python-ecdsa.xibuild b/repo/python-ecdsa/python-ecdsa.xibuild
new file mode 100644
index 0000000..62e15f7
--- /dev/null
+++ b/repo/python-ecdsa/python-ecdsa.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-six python-gmpy python-gmpy2"
+
+PKG_VER=0.17.0
+SOURCE=https://files.pythonhosted.org/packages/source/e/ecdsa/ecdsa-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="ECDSA cryptographic signature library (pure python)"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-exceptiongroup/python-exceptiongroup.xibuild b/repo/python-exceptiongroup/python-exceptiongroup.xibuild
new file mode 100644
index 0000000..fa101ca
--- /dev/null
+++ b/repo/python-exceptiongroup/python-exceptiongroup.xibuild
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+MAKEDEPS="python python-build python-flit-core python-installer python-wheel"
+DEPS="python-pytest"
+
+PKG_VER=1.0.0rc8
+SOURCE=https://github.com/agronholm/exceptiongroup/archive/refs/tags/$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Backport of PEP 654 (exception groups)"
+ADDITIONAL="
+use-flit-core.patch
+"
+
+prepare () {
+ apply_patches
+ sed -i "s/%VERSION%/$PKG_VER/" src/exceptiongroup/__init__.py
+}
+
+build() {
+ python3 -m build --no-isolation --wheel
+}
+
+package () {
+ python3 -m installer -d "$PKG_DEST" \
+ dist/exceptiongroup-$PKG_VER-py3-none-any.whl
+}
diff --git a/repo/python-exceptiongroup/use-flit-core.patch b/repo/python-exceptiongroup/use-flit-core.patch
new file mode 100644
index 0000000..ede931d
--- /dev/null
+++ b/repo/python-exceptiongroup/use-flit-core.patch
@@ -0,0 +1,38 @@
+Use flit_core directly instead of a thin wrapper
+and get rid of setuptools_scm.
+
+--- a/pyproject.toml
++++ b/pyproject.toml
+@@ -1,6 +1,6 @@
+ [build-system]
+-requires = ["flit_scm"]
+-build-backend = "flit_scm:buildapi"
++requires = ["flit_core >=3.4,<4"]
++build-backend = "flit_core.buildapi"
+
+ [project]
+ name = "exceptiongroup"
+@@ -36,11 +36,6 @@ exclude = [
+ ".pre-commit-config.yaml"
+ ]
+
+-[tool.setuptools_scm]
+-version_scheme = "post-release"
+-local_scheme = "dirty-tag"
+-write_to = "src/exceptiongroup/_version.py"
+-
+ [tool.black]
+ target-version = ['py37']
+
+--- a/src/exceptiongroup/__init__.py
++++ b/src/exceptiongroup/__init__.py
+@@ -4,7 +4,8 @@ import os
+ import sys
+
+ from ._catch import catch
+-from ._version import version as __version__ # noqa: F401
++
++__version__ = "%VERSION%"
+
+ if sys.version_info < (3, 11):
+ from ._exceptions import BaseExceptionGroup, ExceptionGroup
diff --git a/repo/python-flit-core/python-flit-core.xibuild b/repo/python-flit-core/python-flit-core.xibuild
new file mode 100644
index 0000000..2a95e8c
--- /dev/null
+++ b/repo/python-flit-core/python-flit-core.xibuild
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+NAME="python-flit-core"
+DESC="simple packaging tool for simple packages (core)"
+
+MAKEDEPS="python-installer python-testpath"
+
+PKG_VER=3.7.1
+SOURCE="https://files.pythonhosted.org/packages/source/f/flit/flit-$PKG_VER.tar.gz"
+
+build() {
+ cd flit_core
+ python3 build_dists.py
+}
+
+check() {
+ python3 -m pytest
+}
+
+package() {
+ python3 -m installer -d "$PKG_DEST" \
+ dist/flit_core-$PKG_VER-py3-none-any.whl
+
+ # remove installed tests
+ rm -r "$PKG_DEST"/usr/lib/python3*/site-packages/flit_core/tests
+}
+
diff --git a/repo/python-frozenlist/python-frozenlist.xibuild b/repo/python-frozenlist/python-frozenlist.xibuild
new file mode 100644
index 0000000..0dad40b
--- /dev/null
+++ b/repo/python-frozenlist/python-frozenlist.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=1.3.0
+SOURCE=https://files.pythonhosted.org/packages/source/f/frozenlist/frozenlist-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="A list-like structure which implements collections.abc.MutableSequence"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-future/python-future.xibuild b/repo/python-future/python-future.xibuild
new file mode 100644
index 0000000..717212b
--- /dev/null
+++ b/repo/python-future/python-future.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=0.18.2
+SOURCE=https://files.pythonhosted.org/packages/source/f/future/future-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Clean single-source support for Python 3 and 2"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-hypothesis/python-hypothesis.xibuild b/repo/python-hypothesis/python-hypothesis.xibuild
new file mode 100644
index 0000000..e3c3d03
--- /dev/null
+++ b/repo/python-hypothesis/python-hypothesis.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-attrs python-sortedcontainers python-exceptiongroup python-black python-click python-django python-dpcontracts python-lark-python-parser python-libcst python-numpy python-pandas python-pytest python-python-python-dateutil python-pytz python-redis python-rich python-importlib-python-metadata python-backports.python-zoneinfo python-tzdata python-click python-black python-rich python-libcst python-python-python-dateutil python-django python-dpcontracts python-black python-lark-python-parser python-numpy python-pandas python-pytest python-pytz python-redis python-backports.python-zoneinfo python-tzdata"
+
+PKG_VER=6.47.4
+SOURCE=https://files.pythonhosted.org/packages/source/h/hypothesis/hypothesis-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="A library for property-based testing"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-idna-ssl/python-idna-ssl.xibuild b/repo/python-idna-ssl/python-idna-ssl.xibuild
new file mode 100644
index 0000000..94b58d3
--- /dev/null
+++ b/repo/python-idna-ssl/python-idna-ssl.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=1.1.0
+SOURCE=https://files.pythonhosted.org/packages/source/i/idna-ssl/idna-ssl-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Patch ssl.match_hostname for Unicode(idna) domains support"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-importlib-metadata/python-importlib-metadata.xibuild b/repo/python-importlib-metadata/python-importlib-metadata.xibuild
new file mode 100644
index 0000000..5667e28
--- /dev/null
+++ b/repo/python-importlib-metadata/python-importlib-metadata.xibuild
@@ -0,0 +1,19 @@
+#!/bin/sh
+NAME="python-importlib-metadata"
+DESC="Read metadata from Python packages"
+
+MAKEDEPS="python-build python-installer python-toml python-wheel"
+
+PKG_VER=4.11.3
+SOURCE="https://pypi.python.org/packages/source/i/importlib_metadata/importlib_metadata-$PKG_VER.tar.gz"
+
+build() {
+ python3 -m build --no-isolation --skip-dependency-check --wheel
+}
+
+package() {
+ python3 -m installer \
+ -d "$PKG_DEST" \
+ dist/importlib_metadata-0.0.0-py3-none-any.whl
+}
+
diff --git a/repo/python-iniconfig/python-iniconfig.xibuild b/repo/python-iniconfig/python-iniconfig.xibuild
new file mode 100644
index 0000000..c1dfd0c
--- /dev/null
+++ b/repo/python-iniconfig/python-iniconfig.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=1.1.1
+SOURCE=https://files.pythonhosted.org/packages/source/i/iniconfig/iniconfig-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="iniconfig: brain-dead simple config-ini parsing"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-lark/python-lark.xibuild b/repo/python-lark/python-lark.xibuild
new file mode 100644
index 0000000..9568fbf
--- /dev/null
+++ b/repo/python-lark/python-lark.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-atomicwrites python-js2py python-regex"
+
+PKG_VER=1.1.2
+SOURCE=https://files.pythonhosted.org/packages/source/l/lark/lark-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="a modern parsing library"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-logbook/python-logbook.xibuild b/repo/python-logbook/python-logbook.xibuild
new file mode 100644
index 0000000..c18b334
--- /dev/null
+++ b/repo/python-logbook/python-logbook.xibuild
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+NAME="python-logbook"
+DESC="Logging replacement for Python"
+
+MAKEDEPS="python cython"
+
+PKG_VER=1.5.3
+SOURCE="https://github.com/getlogbook/logbook/archive/$PKG_VER.tar.gz"
+
+build() {
+ cython logbook/_speedups.pyx
+ python3 setup.py build
+}
+
+package() {
+ python3 setup.py install --prefix=/usr --root="$PKG_DEST"
+}
+
diff --git a/repo/python-matrix-nio/python-matrix-nio.xibuild b/repo/python-matrix-nio/python-matrix-nio.xibuild
new file mode 100644
index 0000000..81db027
--- /dev/null
+++ b/repo/python-matrix-nio/python-matrix-nio.xibuild
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+NAME="python-matrix-nio"
+DESC="Python interface to DBus notifications"
+
+MAKEDEPS="python-build python-installer python-poetry-core python-pyrsistent"
+
+PKG_VER=0.19.0
+SOURCE="
+ https://github.com/poljar/matrix-nio/archive/$PKG_VER/matrix-nio-$PKG_VER.tar.gz
+ "
+
+build() {
+ # XXX: hack to make poetry not ignore files
+ GIT_DIR=. python3 -m build --no-isolation --wheel
+}
+
+check() {
+ python3 -m installer -d testenv \
+ dist/matrix_nio-$PKG_VER-py3-none-any.whl
+ local sitedir="$(python3 -c 'import site;print(site.getsitepackages()[0])')"
+ # test_connect_wrapper requires a network connection
+ PYTHONPATH="$PWD/testenv/$sitedir" python3 -m pytest -k 'not test_connect_wrapper'
+
+}
+
+package() {
+ python3 -m installer -d "$PKG_DEST" \
+ dist/matrix_nio-$PKG_VER-py3-none-any.whl
+}
+
diff --git a/repo/python-nio/python-nio.xibuild b/repo/python-nio/python-nio.xibuild
new file mode 100644
index 0000000..93bdead
--- /dev/null
+++ b/repo/python-nio/python-nio.xibuild
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+PKG_VER=3.4.2
+SOURCE=https://files.pythonhosted.org/packages/source/n/nio/nio-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="A framework for building blocks and modules for the nio Platform"
+
+prepare () {
+ find -name "*.py" | xargs sed -i 's/from collections import Iterable/from collections.abc import Iterable/g'
+}
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-openssl/python-openssl.xibuild b/repo/python-openssl/python-openssl.xibuild
new file mode 100644
index 0000000..db56448
--- /dev/null
+++ b/repo/python-openssl/python-openssl.xibuild
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+NAME="python-openssl"
+DESC="Python3 wrapper module around the OpenSSL library"
+
+MAKEDEPS=""
+
+PKG_VER=21.0.0
+SOURCE="https://files.pythonhosted.org/packages/source/p/pyOpenSSL/pyOpenSSL-$PKG_VER.tar.gz"
+
+build() {
+ python3 setup.py build
+}
+
+package() {
+ python3 setup.py install --prefix=/usr --root="$PKG_DEST"
+}
+
diff --git a/repo/python-pillow/python-pillow.xibuild b/repo/python-pillow/python-pillow.xibuild
new file mode 100644
index 0000000..c7253e8
--- /dev/null
+++ b/repo/python-pillow/python-pillow.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-olefile python-sphinx python-sphinx-python-copybutton python-sphinx-python-issues python-sphinx-python-removed-python-in python-sphinx-python-rtd-python-theme python-sphinxext-python-opengraph python-check-python-manifest python-coverage python-defusedxml python-markdown2 python-olefile python-packaging python-pyroma python-pytest python-pytest-python-cov python-pytest-python-timeout"
+
+PKG_VER=9.1.1
+SOURCE=https://files.pythonhosted.org/packages/source/p/pillow/pillow-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Python Imaging Library (Fork)"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-ply/python-ply.xibuild b/repo/python-ply/python-ply.xibuild
new file mode 100644
index 0000000..f977f12
--- /dev/null
+++ b/repo/python-ply/python-ply.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=3.11
+SOURCE=https://files.pythonhosted.org/packages/source/p/ply/ply-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Python Lex DESC="%desc%" Yacc"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-poetry-core/python-poetry-core.xibuild b/repo/python-poetry-core/python-poetry-core.xibuild
new file mode 100644
index 0000000..0a613bc
--- /dev/null
+++ b/repo/python-poetry-core/python-poetry-core.xibuild
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+NAME="python-poetry-core"
+DESC="PEP 517 build backend implementation for Poetry"
+
+MAKEDEPS="python-pep517 python-build python-installer "
+
+PKG_VER=1.0.8
+SOURCE="https://github.com/python-poetry/poetry-core/archive/$PKG_VER.tar.gz"
+
+build() {
+ # poetry-core excludes files specified in .gitignore
+ # fixed by setting GIT_DIR away from the aports tree's top-level directory
+ GIT_DIR="$PWD" python3 -m build --no-isolation --skip-dependency-check --wheel
+}
+
+package() {
+ python3 -m installer -d "$PKG_DEST" \
+ dist/poetry_core-$PKG_VER-py2.py3-none-any.whl
+
+ # remove vendored versions of installed modules
+ local sitedir=$(python3 -c "import site; print(site.getsitepackages()[0])")
+ rm -r "$PKG_DEST/$sitedir"/poetry/core/_vendor
+
+ install -Dm644 poetry/core/json/schemas/poetry-schema.json \
+ "$PKG_DEST/$sitedir"/poetry/core/json/schemas/poetry-schema.json
+ install -Dm644 poetry/core/spdx/data/licenses.json \
+ "$PKG_DEST/$sitedir"/poetry/core/spdx/data/licenses.json
+}
+
diff --git a/repo/python-py/python-py.xibuild b/repo/python-py/python-py.xibuild
new file mode 100644
index 0000000..c0c3f39
--- /dev/null
+++ b/repo/python-py/python-py.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=1.11.0
+SOURCE=https://files.pythonhosted.org/packages/source/p/py/py-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="library with cross-python path, ini-parsing, io, code, log facilities"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-pycryptodomex/python-pycryptodomex.xibuild b/repo/python-pycryptodomex/python-pycryptodomex.xibuild
new file mode 100644
index 0000000..f0397af
--- /dev/null
+++ b/repo/python-pycryptodomex/python-pycryptodomex.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=3.14.1
+SOURCE=https://files.pythonhosted.org/packages/source/p/pycryptodomex/pycryptodomex-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Cryptographic library for Python"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-pyopenssl/python-pyopenssl.xibuild b/repo/python-pyopenssl/python-pyopenssl.xibuild
new file mode 100644
index 0000000..1aca160
--- /dev/null
+++ b/repo/python-pyopenssl/python-pyopenssl.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-cryptography python-sphinx python-sphinx-python-rtd-python-theme python-flaky python-pretend python-pytest"
+
+PKG_VER=22.0.0
+SOURCE=https://files.pythonhosted.org/packages/source/p/pyopenssl/pyopenssl-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Python wrapper module around the OpenSSL library"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-pyqt-builder/python-pyqt-builder.xibuild b/repo/python-pyqt-builder/python-pyqt-builder.xibuild
new file mode 100644
index 0000000..a018bce
--- /dev/null
+++ b/repo/python-pyqt-builder/python-pyqt-builder.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-packaging python-sip"
+
+PKG_VER=1.13.0
+SOURCE=https://pypi.python.org/packages/source/P/PyQt-builder/PyQt-builder-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="The PEP 517 compliant PyQt build system"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-pyqt5-sip/python-pyqt5-sip.xibuild b/repo/python-pyqt5-sip/python-pyqt5-sip.xibuild
new file mode 100644
index 0000000..649155c
--- /dev/null
+++ b/repo/python-pyqt5-sip/python-pyqt5-sip.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=12.11.0
+SOURCE=https://pypi.python.org/packages/source/P/PyQt5-sip/PyQt5_sip-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="The sip module support for PyQt5"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-pyqt5/python-pyqt5.xibuild b/repo/python-pyqt5/python-pyqt5.xibuild
new file mode 100644
index 0000000..e4cee15
--- /dev/null
+++ b/repo/python-pyqt5/python-pyqt5.xibuild
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+MAKEDEPS="python python-sip"
+DEPS="pyqt5-sip"
+
+PKG_VER=5.15.7
+SOURCE=https://pypi.python.org/packages/source/P/PyQt5/PyQt5-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Python bindings for the Qt cross platform application toolkit"
+
+build() {
+ sip-build \
+ --confirm-license \
+ --qmake /usr/lib/qt5/bin/qmake \
+ --api-dir /usr/share/qt5/qsci/api/python \
+ --no-make
+ make -C build
+}
+
+
+package () {
+ make DESTDIR="$PKG_DEST" INSTALL_ROOT="$PKG_DEST" -C build install -j1
+}
diff --git a/repo/python-pyqtwebengine/python-pyqtwebengine.xibuild b/repo/python-pyqtwebengine/python-pyqtwebengine.xibuild
new file mode 100644
index 0000000..0889c02
--- /dev/null
+++ b/repo/python-pyqtwebengine/python-pyqtwebengine.xibuild
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+MAKEDEPS="python python-toml qt5-qtwebengine"
+DEPS="python"
+
+PKG_VER=5.15.5
+SOURCE="https://pypi.python.org/packages/source/P/PyQtWebEngine/PyQtWebEngine-$PKG_VER.tar.gz"
+MAKEDEPS="python"
+DESC="Python bindings for the Qt WebEngine framework"
+
+build() {
+ sip-build \
+ --no-make \
+ --qmake /usr/lib/qt5/bin/qmake \
+ --api-dir /usr/share/qt5/qsci/api/python
+ make -C build
+}
+
+
+check() {
+ make -C build check
+}
+
+package () {
+ make DESTDIR="$PKG_VER" INSTALL_ROOT="$PKG_VER" -C build install -j1
+}
diff --git a/repo/python-pyrsistent/python-pyrsistent.xibuild b/repo/python-pyrsistent/python-pyrsistent.xibuild
new file mode 100644
index 0000000..8734a95
--- /dev/null
+++ b/repo/python-pyrsistent/python-pyrsistent.xibuild
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+NAME="python-pyrsistent"
+DESC="Persistent/Functional/Immutable data structures"
+
+MAKEDEPS="python"
+
+PKG_VER=0.18.0
+SOURCE="https://github.com/tobgu/pyrsistent/archive/v$PKG_VER.tar.gz"
+
+prepare() {
+ apply_patches
+ sed -i 's/<5//g' setup.py
+}
+
+build() {
+ python3 setup.py build
+}
+
+check() {
+ python3 setup.py test
+}
+
+package() {
+ python3 setup.py install --prefix=/usr --root="$PKG_DEST"
+}
+
diff --git a/repo/python-qrcode/python-qrcode.xibuild b/repo/python-qrcode/python-qrcode.xibuild
new file mode 100644
index 0000000..62c1228
--- /dev/null
+++ b/repo/python-qrcode/python-qrcode.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=7.3.1
+SOURCE=https://files.pythonhosted.org/packages/source/q/qrcode/qrcode-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="QR Code image generator"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-regex/python-regex.xibuild b/repo/python-regex/python-regex.xibuild
new file mode 100644
index 0000000..381fdbf
--- /dev/null
+++ b/repo/python-regex/python-regex.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=2022.6.2
+SOURCE=https://files.pythonhosted.org/packages/source/r/regex/regex-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Alternative regular expression module, to replace re."
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-sip/python-sip.xibuild b/repo/python-sip/python-sip.xibuild
new file mode 100644
index 0000000..9ec9c0d
--- /dev/null
+++ b/repo/python-sip/python-sip.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-packaging python-ply python-setuptools python-toml"
+
+PKG_VER=6.6.2
+SOURCE=https://files.pythonhosted.org/packages/source/s/sip/sip-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="A Python bindings generator for C/C++ libraries"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-sortedcontainers/python-sortedcontainers.xibuild b/repo/python-sortedcontainers/python-sortedcontainers.xibuild
new file mode 100644
index 0000000..1a3fab0
--- /dev/null
+++ b/repo/python-sortedcontainers/python-sortedcontainers.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=2.4.0
+SOURCE=https://files.pythonhosted.org/packages/source/s/sortedcontainers/sortedcontainers-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Sorted Containers -- Sorted List, Sorted Dict, Sorted Set"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-sphinx-rtd-theme/python-sphinx-rtd-theme.xibuild b/repo/python-sphinx-rtd-theme/python-sphinx-rtd-theme.xibuild
new file mode 100644
index 0000000..3b5c934
--- /dev/null
+++ b/repo/python-sphinx-rtd-theme/python-sphinx-rtd-theme.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-sphinx python-docutils python-transifex-python-client python-sphinxcontrib-python-httpdomain python-bump2version"
+
+PKG_VER=1.0.0
+SOURCE=https://files.pythonhosted.org/packages/source/s/sphinx_rtd_theme/sphinx_rtd_theme-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Read the Docs theme for Sphinx"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-testpath/python-testpath.xibuild b/repo/python-testpath/python-testpath.xibuild
new file mode 100644
index 0000000..3fececc
--- /dev/null
+++ b/repo/python-testpath/python-testpath.xibuild
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+NAME="python-testpath"
+DESC="test utilities for working with files and commands"
+
+MAKEDEPS=" python-build python-installer python-wheel"
+
+PKG_VER=0.6.0
+SOURCE="https://files.pythonhosted.org/packages/source/t/testpath/testpath-$PKG_VER.tar.gz"
+
+build() {
+ python3 -m build --no-isolation --wheel
+}
+
+check() {
+ pytest
+}
+
+package() {
+ python3 -m installer -d "$PKG_DEST" \
+ dist/testpath-$PKG_VER-py3-none-any.whl
+}
+
diff --git a/repo/python-tomlkit/python-tomlkit.xibuild b/repo/python-tomlkit/python-tomlkit.xibuild
new file mode 100644
index 0000000..9132464
--- /dev/null
+++ b/repo/python-tomlkit/python-tomlkit.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+
+
+PKG_VER=0.11.0
+SOURCE=https://files.pythonhosted.org/packages/source/t/tomlkit/tomlkit-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="Style preserving TOML library"
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/python-webcolors/python-webcolors.xibuild b/repo/python-webcolors/python-webcolors.xibuild
new file mode 100644
index 0000000..f19886a
--- /dev/null
+++ b/repo/python-webcolors/python-webcolors.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+MAKEDEPS="python"
+DEPS="python-pygments"
+
+PKG_VER=1.12
+SOURCE=https://files.pythonhosted.org/packages/source/w/webcolors/webcolors-$PKG_VER.tar.gz
+MAKEDEPS="python"
+DESC="A library for working with color names and color values formats defined by HTML and CSS."
+
+build() {
+ python setup.py build
+}
+
+package () {
+ python setup.py install --root="$PKG_DEST" --optimize=1
+}
diff --git a/repo/qt5-qtwebchannel/qt5-qtwebchannel.xibuild b/repo/qt5-qtwebchannel/qt5-qtwebchannel.xibuild
new file mode 100644
index 0000000..38a023f
--- /dev/null
+++ b/repo/qt5-qtwebchannel/qt5-qtwebchannel.xibuild
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+NAME="qt5-qtwebchannel"
+DESC="library for seamless integration of C++ +and QML applications with HTML/JavaScript clients."
+
+MAKEDEPS="qt5-qtbase qt5-qtwebsockets qt5-qtdeclarative"
+
+PKG_VER=5.15.3_git20201028
+_commit="47be9a51b01d9fd9e7f6dca81e98d4eedcec6d38"
+SOURCE="https://invent.kde.org/qt/qt/qtwebchannel/-/archive/$_commit/qtwebchannel-$_commit.tar.gz"
+
+prepare() {
+ apply_patches
+
+ # We need to make the build system think we're running in a git repository
+ # so it correctly symlinks during the build
+ mkdir .git
+}
+
+build() {
+ qmake-qt5
+ make
+}
+
+package() {
+ make INSTALL_ROOT="$PKG_DEST" install
+
+ # Drop QMAKE_PRL_BUILD_DIR because reference the build dir
+ find "$PKG_DEST/usr/lib" -type f -name '*.prl' \
+ -exec sed -i -e '/^QMAKE_PRL_BUILD_DIR/d' {} \;
+
+ install -d "$PKG_DEST"/usr/share/licenses
+ ln -s /usr/share/licenses/qt5-base "$PKG_DEST"/usr/share/licenses/qt5-qtwebchannel
+}
diff --git a/repo/qt5-qtwebengine/0001-pretend-to-stay-at-5.15.3.patch b/repo/qt5-qtwebengine/0001-pretend-to-stay-at-5.15.3.patch
new file mode 100644
index 0000000..4b2d9fa
--- /dev/null
+++ b/repo/qt5-qtwebengine/0001-pretend-to-stay-at-5.15.3.patch
@@ -0,0 +1,8 @@
+--- a/.qmake.conf
++++ b/.qmake.conf
+@@ -5,4 +5,4 @@ QTWEBENGINE_OUT_ROOT = $$shadowed($$PWD)
+ load(qt_build_config)
+ CONFIG += warning_clean
+
+-MODULE_VERSION = 5.15.10
++MODULE_VERSION = 5.15.3
diff --git a/repo/qt5-qtwebengine/0010-chromium-musl-Match-syscalls-to-match-musl.patch b/repo/qt5-qtwebengine/0010-chromium-musl-Match-syscalls-to-match-musl.patch
new file mode 100644
index 0000000..524d75d
--- /dev/null
+++ b/repo/qt5-qtwebengine/0010-chromium-musl-Match-syscalls-to-match-musl.patch
@@ -0,0 +1,44 @@
+From ce23b6a6e5a5ebae15dedeebf7044ac9a0249a80 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Fri, 7 Jul 2017 15:24:49 -0700
+Subject: [PATCH] chromium: musl: Match syscalls to match musl
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ chromium/third_party/lss/linux_syscall_support.h | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h b/chromium/third_party/lss/linux_syscall_support.h
+index 80a3e56..1e57b1a 100644
+--- a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h
++++ b/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h
+@@ -824,6 +824,14 @@ struct kernel_statfs {
+ #endif
+
+
++#undef stat64
++#undef fstat64
++
++#ifndef __NR_fstatat
++#define __NR_fstatat __NR_fstatat64
++#endif
++
++
+ #if defined(__x86_64__)
+ #ifndef ARCH_SET_GS
+ #define ARCH_SET_GS 0x1001
+@@ -1258,6 +1266,14 @@ struct kernel_statfs {
+ #ifndef __NR_getrandom
+ #define __NR_getrandom 318
+ #endif
++
++#ifndef __NR_pread
++#define __NR_pread __NR_pread64
++#endif
++#ifndef __NR_pwrite
++#define __NR_pwrite __NR_pwrite64
++#endif
++
+ /* End of x86-64 definitions */
+ #elif defined(__mips__)
+ #if _MIPS_SIM == _MIPS_SIM_ABI32
diff --git a/repo/qt5-qtwebengine/default-pthread-stacksize.patch b/repo/qt5-qtwebengine/default-pthread-stacksize.patch
new file mode 100644
index 0000000..e0ca792
--- /dev/null
+++ b/repo/qt5-qtwebengine/default-pthread-stacksize.patch
@@ -0,0 +1,23 @@
+--- ./src/3rdparty/chromium/base/threading/platform_thread_linux.cc
++++ ./src/3rdparty/chromium/base/threading/platform_thread_linux.cc
+@@ -186,7 +186,8 @@
+
+ size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+ #if !defined(THREAD_SANITIZER)
+- return 0;
++ // use 2mb to avoid running out of space. This is what android uses
++ return 2 * (1 << 20);
+ #else
+ // ThreadSanitizer bloats the stack heavily. Evidence has been that the
+ // default stack size isn't enough for some browser tests.
+--- ./src/3rdparty/chromium/base/threading/platform_thread_unittest.cc.orig
++++ ./src/3rdparty/chromium/base/threading/platform_thread_unittest.cc
+@@ -411,7 +411,7 @@
+ ((defined(OS_LINUX) || defined(OS_CHROMEOS)) && \
+ !defined(THREAD_SANITIZER)) || \
+ (defined(OS_ANDROID) && !defined(ADDRESS_SANITIZER))
+- EXPECT_EQ(0u, stack_size);
++ EXPECT_EQ(2u << 20, stack_size);
+ #else
+ EXPECT_GT(stack_size, 0u);
+ EXPECT_LT(stack_size, 20u * (1 << 20));
diff --git a/repo/qt5-qtwebengine/ffmpeg5.patch b/repo/qt5-qtwebengine/ffmpeg5.patch
new file mode 100644
index 0000000..ea721d1
--- /dev/null
+++ b/repo/qt5-qtwebengine/ffmpeg5.patch
@@ -0,0 +1,151 @@
+Patch-Source: https://github.com/archlinux/svntogit-packages/blob/e8ab98ca62f23ee9633111596977c55ece224d2c/trunk/qt5-webengine-ffmpeg5.patch
+diff --git a/chromium/media/ffmpeg/ffmpeg_common.h b/chromium/media/ffmpeg/ffmpeg_common.h
+index 2734a485cbd..70b1877a43c 100644
+--- a/src/3rdparty/chromium/media/ffmpeg/ffmpeg_common.h
++++ b/src/3rdparty/chromium/media/ffmpeg/ffmpeg_common.h
+@@ -29,6 +29,7 @@ extern "C" {
+ #include <libavformat/avformat.h>
+ #include <libavformat/avio.h>
+ #include <libavutil/avutil.h>
++#include <libavutil/channel_layout.h>
+ #include <libavutil/imgutils.h>
+ #include <libavutil/log.h>
+ #include <libavutil/mastering_display_metadata.h>
+diff --git a/chromium/media/filters/audio_file_reader.cc b/chromium/media/filters/audio_file_reader.cc
+index cb81d920def..bd73908d0ca 100644
+--- a/src/3rdparty/chromium/media/filters/audio_file_reader.cc
++++ b/src/3rdparty/chromium/media/filters/audio_file_reader.cc
+@@ -85,7 +85,7 @@ bool AudioFileReader::OpenDemuxer() {
+ }
+
+ bool AudioFileReader::OpenDecoder() {
+- AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
++ const AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
+ if (codec) {
+ // MP3 decodes to S16P which we don't support, tell it to use S16 instead.
+ if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P)
+diff --git a/chromium/media/filters/ffmpeg_audio_decoder.cc b/chromium/media/filters/ffmpeg_audio_decoder.cc
+index 0d825ed791b..72fac6167ef 100644
+--- a/src/3rdparty/chromium/media/filters/ffmpeg_audio_decoder.cc
++++ b/src/3rdparty/chromium/media/filters/ffmpeg_audio_decoder.cc
+@@ -329,7 +329,7 @@ bool FFmpegAudioDecoder::ConfigureDecoder(const AudioDecoderConfig& config) {
+ }
+ }
+
+- AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
++ const AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
+ if (!codec ||
+ avcodec_open2(codec_context_.get(), codec, &codec_options) < 0) {
+ DLOG(ERROR) << "Could not initialize audio decoder: "
+diff --git a/chromium/media/filters/ffmpeg_demuxer.cc b/chromium/media/filters/ffmpeg_demuxer.cc
+index d34db63f3ef..427565b00c1 100644
+--- a/src/3rdparty/chromium/media/filters/ffmpeg_demuxer.cc
++++ b/src/3rdparty/chromium/media/filters/ffmpeg_demuxer.cc
+@@ -98,12 +98,12 @@ static base::TimeDelta ExtractStartTime(AVStream* stream) {
+
+ // Next try to use the first DTS value, for codecs where we know PTS == DTS
+ // (excludes all H26x codecs). The start time must be returned in PTS.
+- if (stream->first_dts != kNoFFmpegTimestamp &&
++ if (av_stream_get_first_dts(stream) != kNoFFmpegTimestamp &&
+ stream->codecpar->codec_id != AV_CODEC_ID_HEVC &&
+ stream->codecpar->codec_id != AV_CODEC_ID_H264 &&
+ stream->codecpar->codec_id != AV_CODEC_ID_MPEG4) {
+ const base::TimeDelta first_pts =
+- ConvertFromTimeBase(stream->time_base, stream->first_dts);
++ ConvertFromTimeBase(stream->time_base, av_stream_get_first_dts(stream));
+ if (first_pts < start_time)
+ start_time = first_pts;
+ }
+@@ -408,11 +408,11 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
+ scoped_refptr<DecoderBuffer> buffer;
+
+ if (type() == DemuxerStream::TEXT) {
+- int id_size = 0;
++ size_t id_size = 0;
+ uint8_t* id_data = av_packet_get_side_data(
+ packet.get(), AV_PKT_DATA_WEBVTT_IDENTIFIER, &id_size);
+
+- int settings_size = 0;
++ size_t settings_size = 0;
+ uint8_t* settings_data = av_packet_get_side_data(
+ packet.get(), AV_PKT_DATA_WEBVTT_SETTINGS, &settings_size);
+
+@@ -424,7 +424,7 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
+ buffer = DecoderBuffer::CopyFrom(packet->data, packet->size,
+ side_data.data(), side_data.size());
+ } else {
+- int side_data_size = 0;
++ size_t side_data_size = 0;
+ uint8_t* side_data = av_packet_get_side_data(
+ packet.get(), AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, &side_data_size);
+
+@@ -485,7 +485,7 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
+ packet->size - data_offset);
+ }
+
+- int skip_samples_size = 0;
++ size_t skip_samples_size = 0;
+ const uint32_t* skip_samples_ptr =
+ reinterpret_cast<const uint32_t*>(av_packet_get_side_data(
+ packet.get(), AV_PKT_DATA_SKIP_SAMPLES, &skip_samples_size));
+diff --git a/chromium/media/filters/ffmpeg_glue.cc b/chromium/media/filters/ffmpeg_glue.cc
+index 0ef3521473d..8483ecc348f 100644
+--- a/src/3rdparty/chromium/media/filters/ffmpeg_glue.cc
++++ b/src/3rdparty/chromium/media/filters/ffmpeg_glue.cc
+@@ -59,7 +59,6 @@ static int64_t AVIOSeekOperation(void* opaque, int64_t offset, int whence) {
+ }
+
+ void FFmpegGlue::InitializeFFmpeg() {
+- av_register_all();
+ }
+
+ static void LogContainer(bool is_local_file,
+@@ -95,9 +94,6 @@ FFmpegGlue::FFmpegGlue(FFmpegURLProtocol* protocol) {
+ // Enable fast, but inaccurate seeks for MP3.
+ format_context_->flags |= AVFMT_FLAG_FAST_SEEK;
+
+- // Ensures we can read out various metadata bits like vp8 alpha.
+- format_context_->flags |= AVFMT_FLAG_KEEP_SIDE_DATA;
+-
+ // Ensures format parsing errors will bail out. From an audit on 11/2017, all
+ // instances were real failures. Solves bugs like http://crbug.com/710791.
+ format_context_->error_recognition |= AV_EF_EXPLODE;
+diff --git a/chromium/media/filters/ffmpeg_video_decoder.cc b/chromium/media/filters/ffmpeg_video_decoder.cc
+index ef12477ee89..7996606f5f9 100644
+--- a/src/3rdparty/chromium/media/filters/ffmpeg_video_decoder.cc
++++ b/src/3rdparty/chromium/media/filters/ffmpeg_video_decoder.cc
+@@ -391,7 +391,7 @@ bool FFmpegVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config,
+ if (decode_nalus_)
+ codec_context_->flags2 |= AV_CODEC_FLAG2_CHUNKS;
+
+- AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
++ const AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
+ if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
+ ReleaseFFmpegResources();
+ return false;
+diff --git a/chromium/media/filters/media_file_checker.cc b/chromium/media/filters/media_file_checker.cc
+index 59c2a2fc618..1a9872c7acb 100644
+--- a/src/3rdparty/chromium/media/filters/media_file_checker.cc
++++ b/src/3rdparty/chromium/media/filters/media_file_checker.cc
+@@ -68,7 +68,7 @@ bool MediaFileChecker::Start(base::TimeDelta check_time) {
+ auto context = AVStreamToAVCodecContext(format_context->streams[i]);
+ if (!context)
+ continue;
+- AVCodec* codec = avcodec_find_decoder(cp->codec_id);
++ const AVCodec* codec = avcodec_find_decoder(cp->codec_id);
+ if (codec && avcodec_open2(context.get(), codec, nullptr) >= 0) {
+ auto loop = std::make_unique<FFmpegDecodingLoop>(context.get());
+ stream_contexts[i] = {std::move(context), std::move(loop)};
+diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+index 9002b874611..d12fade8b63 100644
+--- a/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
++++ b/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+@@ -203,7 +203,7 @@ int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings,
+ // a pointer |this|.
+ av_context_->opaque = this;
+
+- AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
++ const AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
+ if (!codec) {
+ // This is an indication that FFmpeg has not been initialized or it has not
+ // been compiled/initialized with the correct set of codecs.
diff --git a/repo/qt5-qtwebengine/fix-chromium-build.patch b/repo/qt5-qtwebengine/fix-chromium-build.patch
new file mode 100644
index 0000000..8b26238
--- /dev/null
+++ b/repo/qt5-qtwebengine/fix-chromium-build.patch
@@ -0,0 +1,79 @@
+diff --git a/src/3rdparty/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/src/3rdparty/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+index 1c02aa69a..69e5e58de 100644
+--- a/src/3rdparty/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
++++ b/src/3rdparty/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+@@ -375,6 +375,74 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
+ UNREACHABLE();
+ }
+
++class WasmOutOfLineTrap : public OutOfLineCode {
++ public:
++ WasmOutOfLineTrap(CodeGenerator* gen, Instruction* instr)
++ : OutOfLineCode(gen), gen_(gen), instr_(instr) {}
++ void Generate() override {
++ Arm64OperandConverter i(gen_, instr_);
++ TrapId trap_id =
++ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
++ GenerateCallToTrap(trap_id);
++ }
++
++ protected:
++ CodeGenerator* gen_;
++
++ void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); }
++
++ private:
++ void GenerateCallToTrap(TrapId trap_id) {
++ if (trap_id == TrapId::kInvalid) {
++ // We cannot test calls to the runtime in cctest/test-run-wasm.
++ // Therefore we emit a call to C here instead of a call to the runtime.
++ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(),
++ 0);
++ __ LeaveFrame(StackFrame::WASM);
++ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
++ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
++ pop_count += (pop_count & 1); // align
++ __ Drop(pop_count);
++ __ Ret();
++ } else {
++ gen_->AssembleSourcePosition(instr_);
++ // A direct call to a wasm runtime stub defined in this module.
++ // Just encode the stub index. This will be patched when the code
++ // is added to the native module and copied into wasm code space.
++ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
++ ReferenceMap* reference_map =
++ gen_->zone()->New<ReferenceMap>(gen_->zone());
++ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
++ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
++ }
++ }
++
++ Instruction* instr_;
++};
++
++class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
++ public:
++ WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr)
++ : WasmOutOfLineTrap(gen, instr), pc_(pc) {}
++
++ void Generate() override {
++ gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
++ GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
++ }
++
++ private:
++ int pc_;
++};
++
++void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
++ InstructionCode opcode, Instruction* instr, int pc) {
++ const MemoryAccessMode access_mode =
++ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
++ if (access_mode == kMemoryAccessProtected) {
++ zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
++ }
++}
++
+ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ Arm64OperandConverter const& i) {
diff --git a/repo/qt5-qtwebengine/musl-hacks.patch b/repo/qt5-qtwebengine/musl-hacks.patch
new file mode 100644
index 0000000..3c94b19
--- /dev/null
+++ b/repo/qt5-qtwebengine/musl-hacks.patch
@@ -0,0 +1,13 @@
+diff --git a/src/3rdparty/chromium/net/socket/udp_socket_posix.cc b/src/3rdparty/chromium/net/socket/udp_socket_posix.cc
+index dbc8c5aaf..077bbde33 100644
+--- a/src/3rdparty/chromium/net/socket/udp_socket_posix.cc
++++ b/src/3rdparty/chromium/net/socket/udp_socket_posix.cc
+@@ -1152,7 +1152,7 @@ SendResult UDPSocketPosixSender::InternalSendmmsgBuffers(
+ msg_iov->push_back({const_cast<char*>(buffer->data()), buffer->length()});
+ msgvec->reserve(buffers.size());
+ for (size_t j = 0; j < buffers.size(); j++)
+- msgvec->push_back({{nullptr, 0, &msg_iov[j], 1, nullptr, 0, 0}, 0});
++ msgvec->push_back({{nullptr, 0, &msg_iov[j], 1, 0, 0, 0}, 0});
+ int result = HANDLE_EINTR(Sendmmsg(fd, &msgvec[0], buffers.size(), 0));
+ SendResult send_result(0, 0, std::move(buffers));
+ if (result < 0) {
diff --git a/repo/qt5-qtwebengine/musl-sandbox.patch b/repo/qt5-qtwebengine/musl-sandbox.patch
new file mode 100644
index 0000000..ad01ea8
--- /dev/null
+++ b/repo/qt5-qtwebengine/musl-sandbox.patch
@@ -0,0 +1,181 @@
+diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+index 348ab6e8c..2eac6ef82 100644
+--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+@@ -127,21 +127,11 @@ namespace sandbox {
+ // present (as in newer versions of posix_spawn).
+ ResultExpr RestrictCloneToThreadsAndEPERMFork() {
+ const Arg<unsigned long> flags(0);
+-
+- // TODO(mdempsky): Extend DSL to support (flags & ~mask1) == mask2.
+- const uint64_t kAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
+- CLONE_SIGHAND | CLONE_THREAD |
+- CLONE_SYSVSEM;
+- const uint64_t kObsoleteAndroidCloneMask = kAndroidCloneMask | CLONE_DETACHED;
+-
+- const uint64_t kGlibcPthreadFlags =
+- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD |
+- CLONE_SYSVSEM | CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
+- const BoolExpr glibc_test = flags == kGlibcPthreadFlags;
+-
+- const BoolExpr android_test =
+- AnyOf(flags == kAndroidCloneMask, flags == kObsoleteAndroidCloneMask,
+- flags == kGlibcPthreadFlags);
++ const int required = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
++ CLONE_THREAD | CLONE_SYSVSEM;
++ const int safe = CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID |
++ CLONE_DETACHED;
++ const BoolExpr thread_clone_ok = (flags&~safe)==required;
+
+ // The following two flags are the two important flags in any vfork-emulating
+ // clone call. EPERM any clone call that contains both of them.
+@@ -151,7 +141,7 @@ ResultExpr RestrictCloneToThreadsAndEPERMFork() {
+ AnyOf((flags & (CLONE_VM | CLONE_THREAD)) == 0,
+ (flags & kImportantCloneVforkFlags) == kImportantCloneVforkFlags);
+
+- return If(IsAndroid() ? android_test : glibc_test, Allow())
++ return If(thread_clone_ok, Allow())
+ .ElseIf(is_fork_or_clone_vfork, Error(EPERM))
+ .Else(CrashSIGSYSClone());
+ }
+diff --git a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
+index 6e2bd4fee..9f9e4ad8a 100644
+--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
+@@ -392,6 +392,7 @@ bool SyscallSets::IsAllowedProcessStartOrDeath(int sysno) {
+ #if defined(__i386__)
+ case __NR_waitpid:
+ #endif
++ case __NR_set_tid_address:
+ return true;
+ case __NR_clone: // Should be parameter-restricted.
+ case __NR_setns: // Privileged.
+@@ -404,7 +405,6 @@ bool SyscallSets::IsAllowedProcessStartOrDeath(int sysno) {
+ #if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_set_thread_area:
+ #endif
+- case __NR_set_tid_address:
+ case __NR_unshare:
+ #if !defined(__mips__) && !defined(__aarch64__)
+ case __NR_vfork:
+@@ -514,6 +514,8 @@ bool SyscallSets::IsAllowedAddressSpaceAccess(int sysno) {
+ case __NR_mlock:
+ case __NR_munlock:
+ case __NR_munmap:
++ case __NR_mremap:
++ case __NR_membarrier:
+ return true;
+ case __NR_madvise:
+ case __NR_mincore:
+@@ -531,7 +533,6 @@ bool SyscallSets::IsAllowedAddressSpaceAccess(int sysno) {
+ case __NR_modify_ldt:
+ #endif
+ case __NR_mprotect:
+- case __NR_mremap:
+ case __NR_msync:
+ case __NR_munlockall:
+ case __NR_readahead:
+diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/arm64_linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/arm64_linux_syscalls.h
+index 59d0eab8e..7ae700213 100644
+--- a/src/3rdparty/chromium/sandbox/linux/system_headers/arm64_linux_syscalls.h
++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/arm64_linux_syscalls.h
+@@ -1119,4 +1119,8 @@
+ #define __NR_rseq 293
+ #endif
+
++#if !defined(__NR_membarrier)
++#define __NR_membarrier 283
++#endif
++
+ #endif // SANDBOX_LINUX_SYSTEM_HEADERS_ARM64_LINUX_SYSCALLS_H_
+diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/arm_linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/arm_linux_syscalls.h
+index 1addd5384..d8811ce87 100644
+--- a/src/3rdparty/chromium/sandbox/linux/system_headers/arm_linux_syscalls.h
++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/arm_linux_syscalls.h
+@@ -1441,6 +1441,11 @@
+ #define __NR_io_pgetevents (__NR_SYSCALL_BASE+399)
+ #endif
+
++#if !defined(__NR_membarrier)
++#define __NR_membarrier (__NR_SYSCALL_BASE+389)
++#endif
++
++
+ // ARM private syscalls.
+ #if !defined(__ARM_NR_BASE)
+ #define __ARM_NR_BASE (__NR_SYSCALL_BASE + 0xF0000)
+diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/mips64_linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/mips64_linux_syscalls.h
+index ec75815a8..612fcfaa9 100644
+--- a/src/3rdparty/chromium/sandbox/linux/system_headers/mips64_linux_syscalls.h
++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/mips64_linux_syscalls.h
+@@ -1271,4 +1271,8 @@
+ #define __NR_memfd_create (__NR_Linux + 314)
+ #endif
+
++#if !defined(__NR_membarrier)
++#define __NR_membarrier (__NR_Linux + 318)
++#endif
++
+ #endif // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS64_LINUX_SYSCALLS_H_
+diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/mips_linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/mips_linux_syscalls.h
+index ddbf97f3d..1742acd4c 100644
+--- a/src/3rdparty/chromium/sandbox/linux/system_headers/mips_linux_syscalls.h
++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/mips_linux_syscalls.h
+@@ -1433,4 +1433,8 @@
+ #define __NR_memfd_create (__NR_Linux + 354)
+ #endif
+
++#if !defined(__NR_membarrier)
++#define __NR_membarrier (__NR_Linux + 358)
++#endif
++
+ #endif // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_SYSCALLS_H_
+diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/x86_32_linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/x86_32_linux_syscalls.h
+index a6afc62d9..6ab7740de 100644
+--- a/src/3rdparty/chromium/sandbox/linux/system_headers/x86_32_linux_syscalls.h
++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/x86_32_linux_syscalls.h
+@@ -1710,5 +1710,10 @@
+ #define __NR_clone3 435
+ #endif
+
++#if !defined(__NR_membarrier)
++#define __NR_membarrier 375
++#endif
++
++
+ #endif // SANDBOX_LINUX_SYSTEM_HEADERS_X86_32_LINUX_SYSCALLS_H_
+
+
+diff --git a/src/3rdparty/chromium/sandbox/linux/system_headers/x86_64_linux_syscalls.h b/src/3rdparty/chromium/sandbox/linux/system_headers/x86_64_linux_syscalls.h
+index 349504aee..6a6d4756f 100644
+--- a/src/3rdparty/chromium/sandbox/linux/system_headers/x86_64_linux_syscalls.h
++++ b/src/3rdparty/chromium/sandbox/linux/system_headers/x86_64_linux_syscalls.h
+@@ -1350,5 +1350,10 @@
+ #define __NR_rseq 334
+ #endif
+
++#if !defined(__NR_membarrier)
++#define __NR_membarrier 324
++#endif
++
++
+ #endif // SANDBOX_LINUX_SYSTEM_HEADERS_X86_64_LINUX_SYSCALLS_H_
+
+diff --git a/src/3rdparty/chromium/sandbox/policy/linux/bpf_renderer_policy_linux.cc b/src/3rdparty/chromium/sandbox/policy/linux/bpf_renderer_policy_linux.cc
+index 9fe9575eb..fa1a946f6 100644
+--- a/src/3rdparty/chromium/sandbox/policy/linux/bpf_renderer_policy_linux.cc
++++ b/src/3rdparty/chromium/sandbox/policy/linux/bpf_renderer_policy_linux.cc
+@@ -93,11 +93,11 @@ ResultExpr RendererProcessPolicy::EvaluateSyscall(int sysno) const {
+ case __NR_sysinfo:
+ case __NR_times:
+ case __NR_uname:
+- return Allow();
+- case __NR_sched_getaffinity:
+ case __NR_sched_getparam:
+ case __NR_sched_getscheduler:
+ case __NR_sched_setscheduler:
++ return Allow();
++ case __NR_sched_getaffinity:
+ return RestrictSchedTarget(GetPolicyPid(), sysno);
+ case __NR_prlimit64:
+ // See crbug.com/662450 and setrlimit comment above.
diff --git a/repo/qt5-qtwebengine/nasm.patch b/repo/qt5-qtwebengine/nasm.patch
new file mode 100644
index 0000000..1d58af6
--- /dev/null
+++ b/repo/qt5-qtwebengine/nasm.patch
@@ -0,0 +1,13 @@
+diff --git a/src/3rdparty/chromium/third_party/nasm/config/config-linux.h b/src/3rdparty/chromium/third_party/nasm/config/config-linux.h
+index 7eb7c20ff..3bdc2eb39 100644
+--- a/src/3rdparty/chromium/third_party/nasm/config/config-linux.h
++++ b/src/3rdparty/chromium/third_party/nasm/config/config-linux.h
+@@ -139,7 +139,7 @@
+ #define HAVE_ACCESS 1
+
+ /* Define to 1 if you have the `canonicalize_file_name' function. */
+-#define HAVE_CANONICALIZE_FILE_NAME 1
++// #define HAVE_CANONICALIZE_FILE_NAME 1
+
+ /* Define to 1 if you have the `cpu_to_le16' intrinsic function. */
+ /* #undef HAVE_CPU_TO_LE16 */
diff --git a/repo/qt5-qtwebengine/qt-chromium-python3.patch b/repo/qt5-qtwebengine/qt-chromium-python3.patch
new file mode 100644
index 0000000..41fcb7e
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-chromium-python3.patch
@@ -0,0 +1,1752 @@
+diff -upr b/src/3rdparty/chromium/build/print_python_deps.py a/src/3rdparty/chromium/build/print_python_deps.py
+--- b/src/3rdparty/chromium/build/print_python_deps.py 2022-03-08 18:21:43.709419882 +0100
++++ a/src/3rdparty/chromium/build/print_python_deps.py 2022-03-08 18:23:26.572870672 +0100
+@@ -1,4 +1,4 @@
+-#!/usr/bin/python2.7
++#!/usr/bin/python
+ # Copyright 2016 The Chromium Authors. All rights reserved.
+ # Use of this source code is governed by a BSD-style license that can be
+ # found in the LICENSE file.
+@@ -80,7 +80,7 @@ def _GetTargetPythonVersion(module):
+ if shebang.startswith('#!'):
+ # Examples:
+ # '#!/usr/bin/python'
+- # '#!/usr/bin/python2.7'
++ # '#!/usr/bin/python'
+ # '#!/usr/bin/python3'
+ # '#!/usr/bin/env python3'
+ # '#!/usr/bin/env vpython'
+@@ -152,7 +152,7 @@ def main():
+
+ # Trybots run with vpython as default Python, but with a different config
+ # from //.vpython. To make the is_vpython test work, and to match the behavior
+- # of dev machines, the shebang line must be run with python2.7.
++ # of dev machines, the shebang line must be run with python.
+ #
+ # E.g. $HOME/.vpython-root/dd50d3/bin/python
+ # E.g. /b/s/w/ir/cache/vpython/ab5c79/bin/python
+diff -upr b/src/3rdparty/chromium/components/resources/protobufs/binary_proto_generator.py a/src/3rdparty/chromium/components/resources/protobufs/binary_proto_generator.py
+--- b/src/3rdparty/chromium/components/resources/protobufs/binary_proto_generator.py 2022-03-08 18:21:44.576087412 +0100
++++ a/src/3rdparty/chromium/components/resources/protobufs/binary_proto_generator.py 2022-03-08 18:23:26.572870672 +0100
+@@ -7,7 +7,7 @@
+ Converts a given ASCII proto into a binary resource.
+
+ """
+-
++from __future__ import print_function
+ import abc
+ import imp
+ import optparse
+@@ -196,12 +196,12 @@ class BinaryProtoGenerator:
+ self._ImportProtoModules(opts.path)
+
+ if not self.VerifyArgs(opts):
+- print "Wrong arguments"
++ print("Wrong arguments")
+ return 1
+
+ try:
+ self._GenerateBinaryProtos(opts)
+ except Exception as e:
+- print "ERROR: Failed to render binary version of %s:\n %s\n%s" % (
+- opts.infile, str(e), traceback.format_exc())
++ print("ERROR: Failed to render binary version of %s:\n %s\n%s" %
++ (opts.infile, str(e), traceback.format_exc()))
+ return 1
+diff -upr b/src/3rdparty/chromium/content/browser/tracing/generate_trace_viewer_grd.py a/src/3rdparty/chromium/content/browser/tracing/generate_trace_viewer_grd.py
+--- b/src/3rdparty/chromium/content/browser/tracing/generate_trace_viewer_grd.py 2022-03-08 18:21:44.936087771 +0100
++++ a/src/3rdparty/chromium/content/browser/tracing/generate_trace_viewer_grd.py 2022-03-08 18:23:26.572870672 +0100
+@@ -74,7 +74,7 @@ def main(argv):
+ for filename in parsed_args.source_files:
+ add_file_to_grd(doc, os.path.basename(filename))
+
+- with open(parsed_args.output_filename, 'w') as output_file:
++ with open(parsed_args.output_filename, 'wb') as output_file:
+ output_file.write(doc.toxml(encoding='UTF-8'))
+
+
+diff -upr b/src/3rdparty/chromium/mojo/public/tools/bindings/BUILD.gn a/src/3rdparty/chromium/mojo/public/tools/bindings/BUILD.gn
+--- b/src/3rdparty/chromium/mojo/public/tools/bindings/BUILD.gn 2022-03-08 18:21:45.622755122 +0100
++++ a/src/3rdparty/chromium/mojo/public/tools/bindings/BUILD.gn 2022-03-08 18:23:26.572870672 +0100
+@@ -2,9 +2,11 @@
+ # Use of this source code is governed by a BSD-style license that can be
+ # found in the LICENSE file.
+
++import("//build/config/python.gni")
+ import("//mojo/public/tools/bindings/mojom.gni")
+ import("//third_party/jinja2/jinja2.gni")
+
++# TODO(crbug.com/1194274): Investigate nondeterminism in Py3 builds.
+ action("precompile_templates") {
+ sources = mojom_generator_sources
+ sources += [
+diff -upr b/src/3rdparty/chromium/mojo/public/tools/bindings/gen_data_files_list.py a/src/3rdparty/chromium/mojo/public/tools/bindings/gen_data_files_list.py
+--- b/src/3rdparty/chromium/mojo/public/tools/bindings/gen_data_files_list.py 2022-03-08 18:21:45.622755122 +0100
++++ a/src/3rdparty/chromium/mojo/public/tools/bindings/gen_data_files_list.py 2022-03-08 18:23:26.572870672 +0100
+@@ -18,7 +18,6 @@ import os
+ import re
+ import sys
+
+-from cStringIO import StringIO
+ from optparse import OptionParser
+
+ sys.path.insert(
+@@ -41,12 +40,9 @@ def main():
+ pattern = re.compile(options.pattern)
+ files = [f for f in os.listdir(options.directory) if pattern.match(f)]
+
+- stream = StringIO()
+- for f in files:
+- print(f, file=stream)
++ contents = '\n'.join(f for f in files) + '\n'
++ WriteFile(contents, options.output)
+
+- WriteFile(stream.getvalue(), options.output)
+- stream.close()
+
+ if __name__ == '__main__':
+ sys.exit(main())
+diff -upr b/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_java_generator.py a/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_java_generator.py
+--- b/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_java_generator.py 2022-03-08 18:21:45.629421795 +0100
++++ a/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_java_generator.py 2022-03-08 18:23:26.572870672 +0100
+@@ -25,6 +25,10 @@ sys.path.append(os.path.join(os.path.dir
+ 'build', 'android', 'gyp'))
+ from util import build_utils
+
++# TODO(crbug.com/1174969): Remove this once Python2 is obsoleted.
++if sys.version_info.major != 2:
++ basestring = str
++ long = int
+
+ GENERATOR_PREFIX = 'java'
+
+diff -upr b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/generator.py a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/generator.py
+--- b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/generator.py 2022-03-08 18:21:45.632755132 +0100
++++ a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/generator.py 2022-03-08 18:23:26.572870672 +0100
+@@ -136,9 +136,14 @@ class Stylizer(object):
+
+ def WriteFile(contents, full_path):
+ # If |contents| is same with the file content, we skip updating.
++ if not isinstance(contents, bytes):
++ data = contents.encode('utf8')
++ else:
++ data = contents
++
+ if os.path.isfile(full_path):
+ with open(full_path, 'rb') as destination_file:
+- if destination_file.read() == contents:
++ if destination_file.read() == data:
+ return
+
+ # Make sure the containing directory exists.
+@@ -146,11 +151,8 @@ def WriteFile(contents, full_path):
+ fileutil.EnsureDirectoryExists(full_dir)
+
+ # Dump the data to disk.
+- with open(full_path, "wb") as f:
+- if not isinstance(contents, bytes):
+- f.write(contents.encode('utf-8'))
+- else:
+- f.write(contents)
++ with open(full_path, 'wb') as f:
++ f.write(data)
+
+
+ def AddComputedData(module):
+diff -upr b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/module.py a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/module.py
+--- b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/module.py 2022-03-08 18:21:45.632755132 +0100
++++ a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/module.py 2022-03-08 18:23:26.572870672 +0100
+@@ -398,7 +398,8 @@ class Field(object):
+
+
+ class StructField(Field):
+- pass
++ def __hash__(self):
++ return super(Field, self).__hash__()
+
+
+ class UnionField(Field):
+diff -upr b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/template_expander.py a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/template_expander.py
+--- b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/template_expander.py 2022-03-08 18:21:45.632755132 +0100
++++ a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/template_expander.py 2022-03-08 18:23:26.572870672 +0100
+@@ -75,9 +75,9 @@ def PrecompileTemplates(generator_module
+ os.path.dirname(module.__file__), generator.GetTemplatePrefix())
+ ]))
+ jinja_env.filters.update(generator.GetFilters())
+- jinja_env.compile_templates(
+- os.path.join(output_dir, "%s.zip" % generator.GetTemplatePrefix()),
+- extensions=["tmpl"],
+- zip="stored",
+- py_compile=True,
+- ignore_errors=False)
++ jinja_env.compile_templates(os.path.join(
++ output_dir, "%s.zip" % generator.GetTemplatePrefix()),
++ extensions=["tmpl"],
++ zip="stored",
++ py_compile=sys.version_info.major < 3,
++ ignore_errors=False)
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/blink_v8_bridge.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/blink_v8_bridge.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/blink_v8_bridge.py 2022-03-08 18:21:46.709422871 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/blink_v8_bridge.py 2022-03-08 18:23:26.576204010 +0100
+@@ -344,7 +344,7 @@ def make_default_value_expr(idl_type, de
+ """
+ assert default_value.is_type_compatible_with(idl_type)
+
+- class DefaultValueExpr:
++ class DefaultValueExpr(object):
+ _ALLOWED_SYMBOLS_IN_DEPS = ("isolate")
+
+ def __init__(self, initializer_expr, initializer_deps,
+@@ -502,7 +502,7 @@ def make_v8_to_blink_value(blink_var_nam
+ assert isinstance(blink_var_name, str)
+ assert isinstance(v8_value_expr, str)
+ assert isinstance(idl_type, web_idl.IdlType)
+- assert (argument_index is None or isinstance(argument_index, (int, long)))
++ assert (argument_index is None or isinstance(argument_index, int))
+ assert (default_value is None
+ or isinstance(default_value, web_idl.LiteralConstant))
+
+@@ -622,7 +622,7 @@ def make_v8_to_blink_value_variadic(blin
+ """
+ assert isinstance(blink_var_name, str)
+ assert isinstance(v8_array, str)
+- assert isinstance(v8_array_start_index, (int, long))
++ assert isinstance(v8_array_start_index, int)
+ assert isinstance(idl_type, web_idl.IdlType)
+
+ pattern = ("auto&& ${{{_1}}} = "
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/callback_interface.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/callback_interface.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/callback_interface.py 2022-03-08 18:21:46.709422871 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/callback_interface.py 2022-03-08 18:23:26.576204010 +0100
+@@ -177,7 +177,7 @@ def generate_callback_interface(callback
+ prop_install_mode=PropInstallMode.UNCONDITIONAL,
+ trampoline_var_name=None,
+ attribute_entries=[],
+- constant_entries=filter(is_unconditional, constant_entries),
++ constant_entries=list(filter(is_unconditional, constant_entries)),
+ exposed_construct_entries=[],
+ operation_entries=[])
+ (install_interface_template_decl, install_interface_template_def,
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/code_node.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/code_node.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/code_node.py 2022-03-08 18:21:46.709422871 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/code_node.py 2022-03-08 18:23:26.576204010 +0100
+@@ -503,13 +503,13 @@ class CompositeNode(CodeNode):
+ gensym_kwargs = {}
+ template_vars = {}
+ for arg in args:
+- assert isinstance(arg, (CodeNode, int, long, str))
++ assert isinstance(arg, (CodeNode, int, str))
+ gensym = CodeNode.gensym()
+ gensym_args.append("${{{}}}".format(gensym))
+ template_vars[gensym] = arg
+ for key, value in kwargs.items():
+- assert isinstance(key, (int, long, str))
+- assert isinstance(value, (CodeNode, int, long, str))
++ assert isinstance(key, (int, str))
++ assert isinstance(value, (CodeNode, int, str))
+ gensym = CodeNode.gensym()
+ gensym_kwargs[key] = "${{{}}}".format(gensym)
+ template_vars[gensym] = value
+@@ -602,7 +602,7 @@ class ListNode(CodeNode):
+ def insert(self, index, node):
+ if node is None:
+ return
+- assert isinstance(index, (int, long))
++ assert isinstance(index, int)
+ assert isinstance(node, CodeNode)
+ assert node.outer is None and node.prev is None
+
+@@ -721,7 +721,7 @@ class SymbolScopeNode(SequenceNode):
+ if not scope_chains:
+ return counts
+
+- self_index = iter(scope_chains).next().index(self)
++ self_index = next(iter(scope_chains)).index(self)
+ scope_chains = map(
+ lambda scope_chain: scope_chain[self_index + 1:], scope_chains)
+ scope_to_likeliness = {}
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_expr.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_expr.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_expr.py 2022-03-08 18:21:46.709422871 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_expr.py 2022-03-08 18:23:26.576204010 +0100
+@@ -109,7 +109,7 @@ def expr_and(terms):
+
+ if any(term.is_always_false for term in terms):
+ return _Expr(False)
+- terms = filter(lambda x: not x.is_always_true, terms)
++ terms = list(filter(lambda x: not x.is_always_true, terms))
+ if not terms:
+ return _Expr(True)
+ if len(terms) == 1:
+@@ -124,7 +124,7 @@ def expr_or(terms):
+
+ if any(term.is_always_true for term in terms):
+ return _Expr(True)
+- terms = filter(lambda x: not x.is_always_false, terms)
++ terms = list(filter(lambda x: not x.is_always_false, terms))
+ if not terms:
+ return _Expr(False)
+ if len(terms) == 1:
+@@ -222,7 +222,7 @@ def expr_from_exposure(exposure,
+ elif exposure.only_in_secure_contexts is False:
+ secure_context_term = _Expr(True)
+ else:
+- terms = map(ref_enabled, exposure.only_in_secure_contexts)
++ terms = list(map(ref_enabled, exposure.only_in_secure_contexts))
+ secure_context_term = expr_or(
+ [_Expr("${is_in_secure_context}"),
+ expr_not(expr_and(terms))])
+@@ -275,10 +275,11 @@ def expr_from_exposure(exposure,
+
+ # [ContextEnabled]
+ if exposure.context_enabled_features:
+- terms = map(
+- lambda feature: _Expr(
+- "${{context_feature_settings}}->is{}Enabled()".format(
+- feature)), exposure.context_enabled_features)
++ terms = list(
++ map(
++ lambda feature: _Expr(
++ "${{context_feature_settings}}->is{}Enabled()".format(
++ feature)), exposure.context_enabled_features))
+ context_enabled_terms.append(
+ expr_and([_Expr("${context_feature_settings}"),
+ expr_or(terms)]))
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_format.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_format.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_format.py 2022-03-08 18:21:46.709422871 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_format.py 2022-03-08 18:23:26.576204010 +0100
+@@ -23,7 +23,7 @@ class _TemplateFormatter(string.Formatte
+ self._template_formatter_indexing_count_ = 0
+
+ def get_value(self, key, args, kwargs):
+- if isinstance(key, (int, long)):
++ if isinstance(key, int):
+ return args[key]
+ assert isinstance(key, str)
+ if not key:
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_utils.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_utils.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_utils.py 2022-03-08 18:21:46.709422871 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_utils.py 2022-03-08 18:23:26.576204010 +0100
+@@ -116,4 +116,4 @@ def write_code_node_to_file(code_node, f
+ # stderr=format_result.error_message))
+ #
+ # web_idl.file_io.write_to_file_if_changed(filepath, format_result.contents)
+- web_idl.file_io.write_to_file_if_changed(filepath, rendered_text)
++ web_idl.file_io.write_to_file_if_changed(filepath, rendered_text.encode('utf-8'))
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/dictionary.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/dictionary.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/dictionary.py 2022-03-08 18:21:46.709422871 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/dictionary.py 2022-03-08 18:23:26.576204010 +0100
+@@ -993,7 +993,7 @@ def make_dict_trace_func(cg_context):
+ _2 = _blink_member_name(member).value_var
+ return TextNode(_format(pattern, _1=_1, _2=_2))
+
+- body.extend(map(make_trace_member_node, own_members))
++ body.extend(list(map(make_trace_member_node, own_members)))
+ body.append(TextNode("BaseClass::Trace(visitor);"))
+
+ return func_decl, func_def
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/interface.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/interface.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/interface.py 2022-03-08 18:21:46.709422871 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/interface.py 2022-03-08 18:23:26.579537347 +0100
+@@ -582,7 +582,7 @@ def _make_blink_api_call(code_node,
+ overriding_args=None):
+ assert isinstance(code_node, SymbolScopeNode)
+ assert isinstance(cg_context, CodeGenContext)
+- assert num_of_args is None or isinstance(num_of_args, (int, long))
++ assert num_of_args is None or isinstance(num_of_args, int)
+ assert (overriding_args is None
+ or (isinstance(overriding_args, (list, tuple))
+ and all(isinstance(arg, str) for arg in overriding_args)))
+@@ -1196,8 +1196,10 @@ def make_overload_dispatcher(cg_context)
+ did_use_break = did_use_break or can_fail
+
+ conditional = expr_or(
+- map(lambda item: expr_from_exposure(item.function_like.exposure),
+- items))
++ list(
++ map(
++ lambda item: expr_from_exposure(item.function_like.exposure
++ ), items)))
+ if not conditional.is_always_true:
+ node = CxxUnlikelyIfNode(cond=conditional, body=node)
+
+@@ -4642,7 +4644,7 @@ class _PropEntryConstructorGroup(_PropEn
+ def __init__(self, is_context_dependent, exposure_conditional, world,
+ constructor_group, ctor_callback_name, ctor_func_length):
+ assert isinstance(ctor_callback_name, str)
+- assert isinstance(ctor_func_length, (int, long))
++ assert isinstance(ctor_func_length, int)
+
+ _PropEntryBase.__init__(self, is_context_dependent,
+ exposure_conditional, world, constructor_group)
+@@ -4670,7 +4672,7 @@ class _PropEntryOperationGroup(_PropEntr
+ op_func_length,
+ no_alloc_direct_callback_name=None):
+ assert isinstance(op_callback_name, str)
+- assert isinstance(op_func_length, (int, long))
++ assert isinstance(op_func_length, int)
+
+ _PropEntryBase.__init__(self, is_context_dependent,
+ exposure_conditional, world, operation_group)
+@@ -5175,9 +5177,9 @@ def make_install_interface_template(cg_c
+ ])
+
+ if class_like.identifier == "CSSStyleDeclaration":
+- css_properties = filter(
+- lambda attr: "CSSProperty" in attr.extended_attributes,
+- class_like.attributes)
++ css_properties = list(
++ filter(lambda attr: "CSSProperty" in attr.extended_attributes,
++ class_like.attributes))
+ if css_properties:
+ prop_name_list = "".join(
+ map(lambda attr: "\"{}\", ".format(attr.identifier),
+@@ -5567,8 +5569,8 @@ ${instance_object} = ${v8_context}->Glob
+ "V8DOMConfiguration::InstallConstants(${isolate}, "
+ "${interface_template}, ${prototype_template}, "
+ "kConstantCallbackTable, base::size(kConstantCallbackTable));")
+- constant_callback_entries = filter(lambda entry: entry.const_callback_name,
+- constant_entries)
++ constant_callback_entries = list(filter(lambda entry: entry.const_callback_name,
++ constant_entries))
+ install_properties(table_name, constant_callback_entries,
+ _make_constant_callback_registration_table,
+ installer_call_text)
+@@ -5584,8 +5586,8 @@ ${instance_object} = ${v8_context}->Glob
+ "V8DOMConfiguration::InstallConstants(${isolate}, "
+ "${interface_template}, ${prototype_template}, "
+ "kConstantValueTable, base::size(kConstantValueTable));")
+- constant_value_entries = filter(
+- lambda entry: not entry.const_callback_name, constant_entries)
++ constant_value_entries = list(filter(
++ lambda entry: not entry.const_callback_name, constant_entries))
+ install_properties(table_name, constant_value_entries,
+ _make_constant_value_registration_table,
+ installer_call_text)
+@@ -6336,8 +6338,8 @@ def make_v8_context_snapshot_api(cg_cont
+ assert isinstance(component, web_idl.Component)
+
+ derived_interfaces = cg_context.interface.deriveds
+- derived_names = map(lambda interface: interface.identifier,
+- derived_interfaces)
++ derived_names = list(
++ map(lambda interface: interface.identifier, derived_interfaces))
+ derived_names.append(cg_context.interface.identifier)
+ if not ("Window" in derived_names or "HTMLDocument" in derived_names):
+ return None, None
+@@ -6411,9 +6413,11 @@ def _make_v8_context_snapshot_get_refere
+ collect_callbacks(named_properties_object_callback_defs)
+ collect_callbacks(cross_origin_property_callback_defs)
+
+- entry_nodes = map(
+- lambda name: TextNode("reinterpret_cast<intptr_t>({}),".format(name)),
+- filter(None, callback_names))
++ entry_nodes = list(
++ map(
++ lambda name: TextNode("reinterpret_cast<intptr_t>({}),".format(name
++ )),
++ filter(None, callback_names)))
+ table_node = ListNode([
+ TextNode("using namespace ${class_name}Callbacks;"),
+ TextNode("static const intptr_t kReferenceTable[] = {"),
+@@ -6451,10 +6455,11 @@ def _make_v8_context_snapshot_install_pr
+ class_name=None,
+ prop_install_mode=PropInstallMode.V8_CONTEXT_SNAPSHOT,
+ trampoline_var_name=None,
+- attribute_entries=filter(selector, attribute_entries),
+- constant_entries=filter(selector, constant_entries),
+- exposed_construct_entries=filter(selector, exposed_construct_entries),
+- operation_entries=filter(selector, operation_entries))
++ attribute_entries=list(filter(selector, attribute_entries)),
++ constant_entries=list(filter(selector, constant_entries)),
++ exposed_construct_entries=list(
++ filter(selector, exposed_construct_entries)),
++ operation_entries=list(filter(selector, operation_entries)))
+
+ return func_decl, func_def
+
+@@ -6810,11 +6815,11 @@ def generate_interface(interface_identif
+ class_name=impl_class_name,
+ prop_install_mode=PropInstallMode.UNCONDITIONAL,
+ trampoline_var_name=tp_install_unconditional_props,
+- attribute_entries=filter(is_unconditional, attribute_entries),
+- constant_entries=filter(is_unconditional, constant_entries),
+- exposed_construct_entries=filter(is_unconditional,
+- exposed_construct_entries),
+- operation_entries=filter(is_unconditional, operation_entries))
++ attribute_entries=list(filter(is_unconditional, attribute_entries)),
++ constant_entries=list(filter(is_unconditional, constant_entries)),
++ exposed_construct_entries=list(
++ filter(is_unconditional, exposed_construct_entries)),
++ operation_entries=list(filter(is_unconditional, operation_entries)))
+ (install_context_independent_props_decl,
+ install_context_independent_props_def,
+ install_context_independent_props_trampoline) = make_install_properties(
+@@ -6823,11 +6828,14 @@ def generate_interface(interface_identif
+ class_name=impl_class_name,
+ prop_install_mode=PropInstallMode.CONTEXT_INDEPENDENT,
+ trampoline_var_name=tp_install_context_independent_props,
+- attribute_entries=filter(is_context_independent, attribute_entries),
+- constant_entries=filter(is_context_independent, constant_entries),
+- exposed_construct_entries=filter(is_context_independent,
+- exposed_construct_entries),
+- operation_entries=filter(is_context_independent, operation_entries))
++ attribute_entries=list(
++ filter(is_context_independent, attribute_entries)),
++ constant_entries=list(filter(is_context_independent,
++ constant_entries)),
++ exposed_construct_entries=list(
++ filter(is_context_independent, exposed_construct_entries)),
++ operation_entries=list(
++ filter(is_context_independent, operation_entries)))
+ (install_context_dependent_props_decl, install_context_dependent_props_def,
+ install_context_dependent_props_trampoline) = make_install_properties(
+ cg_context,
+@@ -6835,11 +6843,13 @@ def generate_interface(interface_identif
+ class_name=impl_class_name,
+ prop_install_mode=PropInstallMode.CONTEXT_DEPENDENT,
+ trampoline_var_name=tp_install_context_dependent_props,
+- attribute_entries=filter(is_context_dependent, attribute_entries),
+- constant_entries=filter(is_context_dependent, constant_entries),
+- exposed_construct_entries=filter(is_context_dependent,
+- exposed_construct_entries),
+- operation_entries=filter(is_context_dependent, operation_entries))
++ attribute_entries=list(filter(is_context_dependent,
++ attribute_entries)),
++ constant_entries=list(filter(is_context_dependent, constant_entries)),
++ exposed_construct_entries=list(
++ filter(is_context_dependent, exposed_construct_entries)),
++ operation_entries=list(filter(is_context_dependent,
++ operation_entries)))
+ (install_interface_template_decl, install_interface_template_def,
+ install_interface_template_trampoline) = make_install_interface_template(
+ cg_context,
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/mako_renderer.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/mako_renderer.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/mako_renderer.py 2022-03-08 18:21:46.709422871 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/mako_renderer.py 2022-03-08 18:23:26.579537347 +0100
+@@ -105,7 +105,7 @@ class MakoRenderer(object):
+ on_error = self._caller_stack_on_error
+ if (len(current) <= len(on_error)
+ and all(current[i] == on_error[i]
+- for i in xrange(len(current)))):
++ for i in range(len(current)))):
+ pass # Error happened in a deeper caller.
+ else:
+ self._caller_stack_on_error = list(self._caller_stack)
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/style_format.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/style_format.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/style_format.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/style_format.py 2022-03-08 18:23:26.579537347 +0100
+@@ -70,8 +70,13 @@ def gn_format(contents, filename=None):
+
+
+ def _invoke_format_command(command_line, filename, contents):
+- proc = subprocess.Popen(
+- command_line, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
++ kwargs = {}
++ if sys.version_info.major != 2:
++ kwargs['encoding'] = 'utf-8'
++ proc = subprocess.Popen(command_line,
++ stdin=subprocess.PIPE,
++ stdout=subprocess.PIPE,
++ **kwargs)
+ stdout_output, stderr_output = proc.communicate(input=contents)
+ exit_code = proc.wait()
+
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/task_queue.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/task_queue.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/task_queue.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/task_queue.py 2022-03-08 18:23:26.579537347 +0100
+@@ -2,6 +2,7 @@
+ # Use of this source code is governed by a BSD-style license that can be
+ # found in the LICENSE file.
+
++import functools
+ import multiprocessing
+
+ from .package_initializer import package_initializer
+@@ -76,7 +77,7 @@ class TaskQueue(object):
+ if not report_progress:
+ return
+
+- done_count = reduce(
++ done_count = functools.reduce(
+ lambda count, worker_task: count + bool(worker_task.ready()),
+ self._worker_tasks, 0)
+ report_progress(len(self._worker_tasks), done_count)
+@@ -85,4 +86,4 @@ class TaskQueue(object):
+ def _task_queue_run_tasks(tasks):
+ for task in tasks:
+ func, args, kwargs = task
+- apply(func, args, kwargs)
++ func(*args, **kwargs)
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/code_generator.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/code_generator.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/code_generator.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/code_generator.py 2022-03-08 18:23:26.579537347 +0100
+@@ -13,6 +13,7 @@ import re
+ import sys
+
+ from idl_types import set_ancestors, IdlType
++from itertools import groupby
+ from v8_globals import includes
+ from v8_interface import constant_filters
+ from v8_types import set_component_dirs
+@@ -43,6 +44,7 @@ TEMPLATES_DIR = os.path.normpath(
+ # after path[0] == invoking script dir
+ sys.path.insert(1, THIRD_PARTY_DIR)
+ import jinja2
++from jinja2.filters import make_attrgetter, environmentfilter
+
+
+ def generate_indented_conditional(code, conditional):
+@@ -88,6 +90,13 @@ def runtime_enabled_if(code, name):
+ return generate_indented_conditional(code, function)
+
+
++@environmentfilter
++def do_stringify_key_group_by(environment, value, attribute):
++ expr = make_attrgetter(environment, attribute)
++ key = lambda item: '' if expr(item) is None else str(expr(item))
++ return groupby(sorted(value, key=key), expr)
++
++
+ def initialize_jinja_env(cache_dir):
+ jinja_env = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(TEMPLATES_DIR),
+@@ -117,6 +126,7 @@ def initialize_jinja_env(cache_dir):
+ })
+ jinja_env.filters.update(constant_filters())
+ jinja_env.filters.update(method_filters())
++ jinja_env.filters["stringifykeygroupby"] = do_stringify_key_group_by
+ return jinja_env
+
+
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_origin_trial_features.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_origin_trial_features.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_origin_trial_features.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_origin_trial_features.py 2022-03-08 18:23:26.579537347 +0100
+@@ -80,7 +80,7 @@ def read_idl_file(reader, idl_filename):
+ assert len(interfaces) == 1, (
+ "Expected one interface in file %r, found %d" %
+ (idl_filename, len(interfaces)))
+- return (interfaces.values()[0], includes)
++ return (list(interfaces.values())[0], includes)
+
+
+ def interface_is_global(interface):
+@@ -281,7 +281,7 @@ def main():
+
+ info_provider = create_component_info_provider(
+ os.path.normpath(options.info_dir), options.target_component)
+- idl_filenames = map(str.strip, open(options.idl_files_list))
++ idl_filenames = list(map(str.strip, open(options.idl_files_list)))
+
+ generate_origin_trial_features(info_provider, options, idl_filenames)
+ return 0
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_definitions.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_definitions.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_definitions.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_definitions.py 2022-03-08 18:23:26.582870685 +0100
+@@ -394,7 +394,8 @@ class IdlInterface(object):
+ else:
+ raise ValueError('Unrecognized node class: %s' % child_class)
+
+- if len(filter(None, [self.iterable, self.maplike, self.setlike])) > 1:
++ if len(list(filter(None,
++ [self.iterable, self.maplike, self.setlike]))) > 1:
+ raise ValueError(
+ 'Interface can only have one of iterable<>, maplike<> and setlike<>.'
+ )
+@@ -512,6 +513,9 @@ class IdlAttribute(TypedObject):
+ def accept(self, visitor):
+ visitor.visit_attribute(self)
+
++ def __lt__(self, other):
++ return self.name < other.name
++
+
+ ################################################################################
+ # Constants
+@@ -852,7 +856,7 @@ class IdlIncludes(object):
+ ################################################################################
+
+
+-class Exposure:
++class Exposure(object):
+ """An Exposure holds one Exposed or RuntimeEnabled condition.
+ Each exposure has two properties: exposed and runtime_enabled.
+ Exposure(e, r) corresponds to [Exposed(e r)]. Exposure(e) corresponds to
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_reader.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_reader.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_reader.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_reader.py 2022-03-08 18:23:26.582870685 +0100
+@@ -55,8 +55,8 @@ def validate_blink_idl_definitions(idl_f
+ definitions. There is no filename convention in this case.
+ - Otherwise, an IDL file is invalid.
+ """
+- targets = (
+- definitions.interfaces.values() + definitions.dictionaries.values())
++ targets = (list(definitions.interfaces.values()) +
++ list(definitions.dictionaries.values()))
+ number_of_targets = len(targets)
+ if number_of_targets > 1:
+ raise Exception(
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_types.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_types.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_types.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_types.py 2022-03-08 18:23:26.582870685 +0100
+@@ -349,7 +349,7 @@ class IdlUnionType(IdlTypeBase):
+ return True
+
+ def single_matching_member_type(self, predicate):
+- matching_types = filter(predicate, self.flattened_member_types)
++ matching_types = list(filter(predicate, self.flattened_member_types))
+ if len(matching_types) > 1:
+ raise ValueError('%s is ambiguous.' % self.name)
+ return matching_types[0] if matching_types else None
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/utilities.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/utilities.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/utilities.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/utilities.py 2022-03-08 18:23:26.582870685 +0100
+@@ -196,8 +196,9 @@ class ComponentInfoProviderModules(Compo
+
+ @property
+ def callback_functions(self):
+- return dict(self._component_info_core['callback_functions'].items() +
+- self._component_info_modules['callback_functions'].items())
++ return dict(
++ list(self._component_info_core['callback_functions'].items()) +
++ list(self._component_info_modules['callback_functions'].items()))
+
+ @property
+ def specifier_for_export(self):
+@@ -209,8 +210,8 @@ class ComponentInfoProviderModules(Compo
+
+
+ def load_interfaces_info_overall_pickle(info_dir):
+- with open(os.path.join(info_dir,
+- 'interfaces_info.pickle')) as interface_info_file:
++ with open(os.path.join(info_dir, 'interfaces_info.pickle'),
++ mode='rb') as interface_info_file:
+ return pickle.load(interface_info_file)
+
+
+@@ -236,23 +237,20 @@ def merge_dict_recursively(target, diff)
+
+ def create_component_info_provider_core(info_dir):
+ interfaces_info = load_interfaces_info_overall_pickle(info_dir)
+- with open(
+- os.path.join(info_dir, 'core',
+- 'component_info_core.pickle')) as component_info_file:
++ with open(os.path.join(info_dir, 'core', 'component_info_core.pickle'),
++ mode='rb') as component_info_file:
+ component_info = pickle.load(component_info_file)
+ return ComponentInfoProviderCore(interfaces_info, component_info)
+
+
+ def create_component_info_provider_modules(info_dir):
+ interfaces_info = load_interfaces_info_overall_pickle(info_dir)
+- with open(
+- os.path.join(info_dir, 'core',
+- 'component_info_core.pickle')) as component_info_file:
++ with open(os.path.join(info_dir, 'core', 'component_info_core.pickle'),
++ mode='rb') as component_info_file:
+ component_info_core = pickle.load(component_info_file)
+- with open(
+- os.path.join(
+- info_dir, 'modules',
+- 'component_info_modules.pickle')) as component_info_file:
++ with open(os.path.join(info_dir, 'modules',
++ 'component_info_modules.pickle'),
++ mode='rb') as component_info_file:
+ component_info_modules = pickle.load(component_info_file)
+ return ComponentInfoProviderModules(interfaces_info, component_info_core,
+ component_info_modules)
+@@ -356,7 +354,7 @@ def write_pickle_file(pickle_filename, d
+ pickle_filename = abs(pickle_filename)
+ # If |data| is same with the file content, we skip updating.
+ if os.path.isfile(pickle_filename):
+- with open(pickle_filename) as pickle_file:
++ with open(pickle_filename, 'rb') as pickle_file:
+ try:
+ if pickle.load(pickle_file) == data:
+ return
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_interface.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_interface.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_interface.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_interface.py 2022-03-08 18:23:26.582870685 +0100
+@@ -189,7 +189,7 @@ def context_enabled_features(attributes)
+ return sorted([
+ member for member in members
+ if member.get(KEY) and not member.get('exposed_test')
+- ])
++ ], key=lambda item: item['name'])
+
+ def member_filter_by_name(members, name):
+ return [member for member in members if member[KEY] == name]
+@@ -612,7 +612,8 @@ def interface_context(interface, interfa
+ sorted(
+ origin_trial_features(interface, context['constants'],
+ context['attributes'], context['methods']) +
+- context_enabled_features(context['attributes'])),
++ context_enabled_features(context['attributes']),
++ key=lambda item: item['name']),
+ })
+ if context['optional_features']:
+ includes.add('platform/bindings/v8_per_context_data.h')
+@@ -1356,9 +1357,9 @@ def resolution_tests_methods(effective_o
+
+ # Extract argument and IDL type to simplify accessing these in each loop.
+ arguments = [method['arguments'][index] for method in methods]
+- arguments_methods = zip(arguments, methods)
++ arguments_methods = list(zip(arguments, methods))
+ idl_types = [argument['idl_type_object'] for argument in arguments]
+- idl_types_methods = zip(idl_types, methods)
++ idl_types_methods = list(zip(idl_types, methods))
+
+ # We can’t do a single loop through all methods or simply sort them, because
+ # a method may be listed in multiple steps of the resolution algorithm, and
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_methods.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_methods.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_methods.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_methods.py 2022-03-08 18:23:26.582870685 +0100
+@@ -46,6 +46,10 @@ import v8_types
+ import v8_utilities
+ from v8_utilities import (has_extended_attribute_value, is_unforgeable)
+
++# TODO: Remove this once Python2 is obsoleted.
++if sys.version_info.major != 2:
++ basestring = str
++
+
+ def method_is_visible(method, interface_is_partial):
+ if 'overloads' in method:
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_utilities.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_utilities.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_utilities.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_utilities.py 2022-03-08 18:23:26.582870685 +0100
+@@ -271,7 +271,7 @@ EXPOSED_WORKERS = set([
+ ])
+
+
+-class ExposureSet:
++class ExposureSet(object):
+ """An ExposureSet is a collection of Exposure instructions."""
+
+ def __init__(self, exposures=None):
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/callback_interface.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/callback_interface.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/callback_interface.py 2022-03-08 18:21:46.712756208 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/callback_interface.py 2022-03-08 18:23:26.582870685 +0100
+@@ -91,11 +91,13 @@ class CallbackInterface(UserDefinedType,
+ for operation_ir in ir.operations
+ ])
+ self._operation_groups = tuple([
+- OperationGroup(
+- operation_group_ir,
+- filter(lambda x: x.identifier == operation_group_ir.identifier,
+- self._operations),
+- owner=self) for operation_group_ir in ir.operation_groups
++ OperationGroup(operation_group_ir,
++ list(
++ filter(
++ lambda x: x.identifier == operation_group_ir
++ .identifier, self._operations)),
++ owner=self)
++ for operation_group_ir in ir.operation_groups
+ ])
+
+ @property
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/database.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/database.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/database.py 2022-03-08 18:21:46.716089544 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/database.py 2022-03-08 18:23:26.582870685 +0100
+@@ -156,4 +156,4 @@ class Database(object):
+ return self._view_by_kind(Database._Kind.UNION)
+
+ def _view_by_kind(self, kind):
+- return self._impl.find_by_kind(kind).values()
++ return list(self._impl.find_by_kind(kind).values())
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/exposure.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/exposure.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/exposure.py 2022-03-08 18:21:46.716089544 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/exposure.py 2022-03-08 18:23:26.582870685 +0100
+@@ -8,8 +8,11 @@ from .runtime_enabled_features import Ru
+ class _Feature(str):
+ """Represents a runtime-enabled feature."""
+
++ def __new__(cls, value):
++ return str.__new__(cls, value)
++
+ def __init__(self, value):
+- str.__init__(self, value)
++ str.__init__(self)
+ self._is_context_dependent = (
+ RuntimeEnabledFeatures.is_context_dependent(self))
+
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/function_like.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/function_like.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/function_like.py 2022-03-08 18:21:46.716089544 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/function_like.py 2022-03-08 18:23:26.586204022 +0100
+@@ -71,8 +71,9 @@ class FunctionLike(WithIdentifier):
+ def num_of_required_arguments(self):
+ """Returns the number of required arguments."""
+ return len(
+- filter(lambda arg: not (arg.is_optional or arg.is_variadic),
+- self.arguments))
++ list(
++ filter(lambda arg: not (arg.is_optional or arg.is_variadic),
++ self.arguments)))
+
+
+ class OverloadGroup(WithIdentifier):
+@@ -171,8 +172,7 @@ class OverloadGroup(WithIdentifier):
+ Returns the effective overload set.
+ https://heycam.github.io/webidl/#compute-the-effective-overload-set
+ """
+- assert argument_count is None or isinstance(argument_count,
+- (int, long))
++ assert argument_count is None or isinstance(argument_count, int)
+
+ N = argument_count
+ S = []
+@@ -188,21 +188,21 @@ class OverloadGroup(WithIdentifier):
+
+ S.append(
+ OverloadGroup.EffectiveOverloadItem(
+- X, map(lambda arg: arg.idl_type, X.arguments),
+- map(lambda arg: arg.optionality, X.arguments)))
++ X, list(map(lambda arg: arg.idl_type, X.arguments)),
++ list(map(lambda arg: arg.optionality, X.arguments))))
+
+ if X.is_variadic:
+- for i in xrange(n, max(maxarg, N)):
+- t = map(lambda arg: arg.idl_type, X.arguments)
+- o = map(lambda arg: arg.optionality, X.arguments)
+- for _ in xrange(n, i + 1):
++ for i in range(n, max(maxarg, N)):
++ t = list(map(lambda arg: arg.idl_type, X.arguments))
++ o = list(map(lambda arg: arg.optionality, X.arguments))
++ for _ in range(n, i + 1):
+ t.append(X.arguments[-1].idl_type)
+ o.append(X.arguments[-1].optionality)
+ S.append(OverloadGroup.EffectiveOverloadItem(X, t, o))
+
+- t = map(lambda arg: arg.idl_type, X.arguments)
+- o = map(lambda arg: arg.optionality, X.arguments)
+- for i in xrange(n - 1, -1, -1):
++ t = list(map(lambda arg: arg.idl_type, X.arguments))
++ o = list(map(lambda arg: arg.optionality, X.arguments))
++ for i in range(n - 1, -1, -1):
+ if X.arguments[i].optionality == IdlType.Optionality.REQUIRED:
+ break
+ S.append(OverloadGroup.EffectiveOverloadItem(X, t[:i], o[:i]))
+@@ -222,7 +222,7 @@ class OverloadGroup(WithIdentifier):
+ for item in items)
+ assert len(items) > 1
+
+- for index in xrange(len(items[0].type_list)):
++ for index in range(len(items[0].type_list)):
+ # Assume that the given items are valid, and we only need to test
+ # the two types.
+ if OverloadGroup.are_distinguishable_types(
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_compiler.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_compiler.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_compiler.py 2022-03-08 18:21:46.716089544 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_compiler.py 2022-03-08 18:23:26.586204022 +0100
+@@ -149,8 +149,8 @@ class IdlCompiler(object):
+ for old_ir in old_irs:
+ new_ir = make_copy(old_ir)
+ self._ir_map.add(new_ir)
+- new_ir.attributes = filter(not_disabled, new_ir.attributes)
+- new_ir.operations = filter(not_disabled, new_ir.operations)
++ new_ir.attributes = list(filter(not_disabled, new_ir.attributes))
++ new_ir.operations = list(filter(not_disabled, new_ir.operations))
+
+ def _record_defined_in_partial_and_mixin(self):
+ old_irs = self._ir_map.irs_of_kinds(
+@@ -231,7 +231,7 @@ class IdlCompiler(object):
+ only_to_members_of_partial_or_mixin=False)
+ propagate_to_exposure(propagate)
+
+- map(process_member_like, ir.iter_all_members())
++ list(map(process_member_like, ir.iter_all_members()))
+
+ def process_member_like(ir):
+ propagate = functools.partial(propagate_extattr, ir=ir)
+@@ -257,7 +257,7 @@ class IdlCompiler(object):
+
+ self._ir_map.move_to_new_phase()
+
+- map(process_interface_like, old_irs)
++ list(map(process_interface_like, old_irs))
+
+ def _determine_blink_headers(self):
+ irs = self._ir_map.irs_of_kinds(
+@@ -422,9 +422,9 @@ class IdlCompiler(object):
+ assert not new_interface.deriveds
+ derived_set = identifier_to_derived_set.get(
+ new_interface.identifier, set())
+- new_interface.deriveds = map(
+- lambda id_: self._ref_to_idl_def_factory.create(id_),
+- sorted(derived_set))
++ new_interface.deriveds = list(
++ map(lambda id_: self._ref_to_idl_def_factory.create(id_),
++ sorted(derived_set)))
+
+ def _supplement_missing_html_constructor_operation(self):
+ # Temporary mitigation of misuse of [HTMLConstructor]
+@@ -553,7 +553,8 @@ class IdlCompiler(object):
+ self._ir_map.add(new_ir)
+
+ for group in new_ir.iter_all_overload_groups():
+- exposures = map(lambda overload: overload.exposure, group)
++ exposures = list(map(lambda overload: overload.exposure,
++ group))
+
+ # [Exposed]
+ if any(not exposure.global_names_and_features
+@@ -653,8 +654,8 @@ class IdlCompiler(object):
+ constructs = set()
+ for global_name in global_names:
+ constructs.update(exposed_map.get(global_name, []))
+- new_ir.exposed_constructs = map(
+- self._ref_to_idl_def_factory.create, sorted(constructs))
++ new_ir.exposed_constructs = list(
++ map(self._ref_to_idl_def_factory.create, sorted(constructs)))
+
+ assert not new_ir.legacy_window_aliases
+ if new_ir.identifier != 'Window':
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/interface.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/interface.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/interface.py 2022-03-08 18:21:46.716089544 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/interface.py 2022-03-08 18:23:26.586204022 +0100
+@@ -180,8 +180,9 @@ class Interface(UserDefinedType, WithExt
+ self._constructor_groups = tuple([
+ ConstructorGroup(
+ group_ir,
+- filter(lambda x: x.identifier == group_ir.identifier,
+- self._constructors),
++ list(
++ filter(lambda x: x.identifier == group_ir.identifier,
++ self._constructors)),
+ owner=self) for group_ir in ir.constructor_groups
+ ])
+ assert len(self._constructor_groups) <= 1
+@@ -192,8 +193,9 @@ class Interface(UserDefinedType, WithExt
+ self._named_constructor_groups = tuple([
+ ConstructorGroup(
+ group_ir,
+- filter(lambda x: x.identifier == group_ir.identifier,
+- self._named_constructors),
++ list(
++ filter(lambda x: x.identifier == group_ir.identifier,
++ self._named_constructors)),
+ owner=self) for group_ir in ir.named_constructor_groups
+ ])
+ self._operations = tuple([
+@@ -203,22 +205,23 @@ class Interface(UserDefinedType, WithExt
+ self._operation_groups = tuple([
+ OperationGroup(
+ group_ir,
+- filter(lambda x: x.identifier == group_ir.identifier,
+- self._operations),
++ list(
++ filter(lambda x: x.identifier == group_ir.identifier,
++ self._operations)),
+ owner=self) for group_ir in ir.operation_groups
+ ])
+ self._exposed_constructs = tuple(ir.exposed_constructs)
+ self._legacy_window_aliases = tuple(ir.legacy_window_aliases)
+ self._indexed_and_named_properties = None
+- indexed_and_named_property_operations = filter(
+- lambda x: x.is_indexed_or_named_property_operation,
+- self._operations)
++ indexed_and_named_property_operations = list(
++ filter(lambda x: x.is_indexed_or_named_property_operation,
++ self._operations))
+ if indexed_and_named_property_operations:
+ self._indexed_and_named_properties = IndexedAndNamedProperties(
+ indexed_and_named_property_operations, owner=self)
+ self._stringifier = None
+- stringifier_operation_irs = filter(lambda x: x.is_stringifier,
+- ir.operations)
++ stringifier_operation_irs = list(
++ filter(lambda x: x.is_stringifier, ir.operations))
+ if stringifier_operation_irs:
+ assert len(stringifier_operation_irs) == 1
+ op_ir = make_copy(stringifier_operation_irs[0])
+@@ -231,8 +234,9 @@ class Interface(UserDefinedType, WithExt
+ attribute = None
+ if operation.stringifier_attribute:
+ attr_id = operation.stringifier_attribute
+- attributes = filter(lambda x: x.identifier == attr_id,
+- self._attributes)
++ attributes = list(
++ filter(lambda x: x.identifier == attr_id,
++ self._attributes))
+ assert len(attributes) == 1
+ attribute = attributes[0]
+ self._stringifier = Stringifier(operation, attribute, owner=self)
+@@ -578,8 +582,9 @@ class Iterable(WithDebugInfo):
+ self._operation_groups = tuple([
+ OperationGroup(
+ group_ir,
+- filter(lambda x: x.identifier == group_ir.identifier,
+- self._operations),
++ list(
++ filter(lambda x: x.identifier == group_ir.identifier,
++ self._operations)),
+ owner=owner) for group_ir in ir.operation_groups
+ ])
+
+@@ -666,8 +671,9 @@ class Maplike(WithDebugInfo):
+ self._operation_groups = tuple([
+ OperationGroup(
+ group_ir,
+- filter(lambda x: x.identifier == group_ir.identifier,
+- self._operations),
++ list(
++ filter(lambda x: x.identifier == group_ir.identifier,
++ self._operations)),
+ owner=owner) for group_ir in ir.operation_groups
+ ])
+
+@@ -755,8 +761,9 @@ class Setlike(WithDebugInfo):
+ self._operation_groups = tuple([
+ OperationGroup(
+ group_ir,
+- filter(lambda x: x.identifier == group_ir.identifier,
+- self._operations),
++ list(
++ filter(lambda x: x.identifier == group_ir.identifier,
++ self._operations)),
+ owner=owner) for group_ir in ir.operation_groups
+ ])
+
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/ir_builder.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/ir_builder.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/ir_builder.py 2022-03-08 18:21:46.716089544 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/ir_builder.py 2022-03-08 18:23:26.586204022 +0100
+@@ -2,6 +2,8 @@
+ # Use of this source code is governed by a BSD-style license that can be
+ # found in the LICENSE file.
+
++import sys
++
+ from .argument import Argument
+ from .ast_group import AstGroup
+ from .attribute import Attribute
+@@ -30,6 +32,11 @@ from .operation import Operation
+ from .typedef import Typedef
+
+
++# TODO: Remove this once Python2 is obsoleted.
++if sys.version_info.major != 2:
++ long = int
++
++
+ def load_and_register_idl_definitions(filepaths, register_ir,
+ create_ref_to_idl_def, idl_type_factory):
+ """
+@@ -160,7 +167,7 @@ class _IRBuilder(object):
+ child_nodes = list(node.GetChildren())
+ extended_attributes = self._take_extended_attributes(child_nodes)
+
+- members = map(self._build_interface_member, child_nodes)
++ members = list(map(self._build_interface_member, child_nodes))
+ attributes = []
+ constants = []
+ operations = []
+@@ -302,7 +309,7 @@ class _IRBuilder(object):
+ child_nodes = list(node.GetChildren())
+ inherited = self._take_inheritance(child_nodes)
+ extended_attributes = self._take_extended_attributes(child_nodes)
+- own_members = map(self._build_dictionary_member, child_nodes)
++ own_members = list(map(self._build_dictionary_member, child_nodes))
+
+ return Dictionary.IR(
+ identifier=Identifier(node.GetName()),
+@@ -336,7 +343,7 @@ class _IRBuilder(object):
+
+ child_nodes = list(node.GetChildren())
+ extended_attributes = self._take_extended_attributes(child_nodes)
+- members = map(self._build_interface_member, child_nodes)
++ members = list(map(self._build_interface_member, child_nodes))
+ constants = []
+ operations = []
+ for member in members:
+@@ -456,8 +463,8 @@ class _IRBuilder(object):
+ assert len(child_nodes) == 1
+ child = child_nodes[0]
+ if child.GetClass() == 'Arguments':
+- arguments = map(build_extattr_argument,
+- child.GetChildren())
++ arguments = list(
++ map(build_extattr_argument, child.GetChildren()))
+ elif child.GetClass() == 'Call':
+ assert len(child.GetChildren()) == 1
+ grand_child = child.GetChildren()[0]
+@@ -486,7 +493,9 @@ class _IRBuilder(object):
+
+ assert node.GetClass() == 'ExtAttributes'
+ return ExtendedAttributes(
+- filter(None, map(build_extended_attribute, node.GetChildren())))
++ list(
++ filter(None, map(build_extended_attribute,
++ node.GetChildren()))))
+
+ def _build_inheritance(self, node):
+ assert node.GetClass() == 'Inherit'
+@@ -506,7 +515,7 @@ class _IRBuilder(object):
+
+ def _build_iterable(self, node):
+ assert node.GetClass() == 'Iterable'
+- types = map(self._build_type, node.GetChildren())
++ types = list(map(self._build_type, node.GetChildren()))
+ assert len(types) == 1 or len(types) == 2
+ if len(types) == 1: # value iterator
+ key_type, value_type = (None, types[0])
+@@ -584,7 +593,7 @@ class _IRBuilder(object):
+ def _build_maplike(self, node, interface_identifier):
+ assert node.GetClass() == 'Maplike'
+ assert isinstance(interface_identifier, Identifier)
+- types = map(self._build_type, node.GetChildren())
++ types = list(map(self._build_type, node.GetChildren()))
+ assert len(types) == 2
+ key_type, value_type = types
+ is_readonly = bool(node.GetProperty('READONLY'))
+@@ -676,7 +685,7 @@ class _IRBuilder(object):
+ def _build_setlike(self, node, interface_identifier):
+ assert node.GetClass() == 'Setlike'
+ assert isinstance(interface_identifier, Identifier)
+- types = map(self._build_type, node.GetChildren())
++ types = list(map(self._build_type, node.GetChildren()))
+ assert len(types) == 1
+ value_type = types[0]
+ is_readonly = bool(node.GetProperty('READONLY'))
+@@ -838,7 +847,7 @@ class _IRBuilder(object):
+
+ def build_union_type(node, extended_attributes):
+ return self._idl_type_factory.union_type(
+- member_types=map(self._build_type, node.GetChildren()),
++ member_types=list(map(self._build_type, node.GetChildren())),
+ is_optional=is_optional,
+ extended_attributes=extended_attributes,
+ debug_info=self._build_debug_info(node))
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/make_copy.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/make_copy.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/make_copy.py 2022-03-08 18:21:46.716089544 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/make_copy.py 2022-03-08 18:23:26.586204022 +0100
+@@ -3,6 +3,13 @@
+ # found in the LICENSE file.
+
+
++import sys
++
++# TODO: Remove this once Python2 is obsoleted.
++if sys.version_info.major != 2:
++ long = int
++ basestring = str
++
+ def make_copy(obj, memo=None):
+ """
+ Creates a copy of the given object, which should be an IR or part of IR.
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/namespace.py a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/namespace.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/namespace.py 2022-03-08 18:21:46.716089544 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/namespace.py 2022-03-08 18:23:26.586204022 +0100
+@@ -107,11 +107,13 @@ class Namespace(UserDefinedType, WithExt
+ for operation_ir in ir.operations
+ ])
+ self._operation_groups = tuple([
+- OperationGroup(
+- operation_group_ir,
+- filter(lambda x: x.identifier == operation_group_ir.identifier,
+- self._operations),
+- owner=self) for operation_group_ir in ir.operation_groups
++ OperationGroup(operation_group_ir,
++ list(
++ filter(
++ lambda x: x.identifier == operation_group_ir
++ .identifier, self._operations)),
++ owner=self)
++ for operation_group_ir in ir.operation_groups
+ ])
+
+ @property
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/bindings/templates/dictionary_v8.cc.tmpl a/src/3rdparty/chromium/third_party/blink/renderer/bindings/templates/dictionary_v8.cc.tmpl
+--- b/src/3rdparty/chromium/third_party/blink/renderer/bindings/templates/dictionary_v8.cc.tmpl 2022-03-08 18:21:46.716089544 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/bindings/templates/dictionary_v8.cc.tmpl 2022-03-08 18:23:26.586204022 +0100
+@@ -59,9 +59,9 @@ void {{v8_class}}::ToImpl(v8::Isolate* i
+ DCHECK(executionContext);
+ {% endif %}{# has_origin_trial_members #}
+ {% endif %}{# members #}
+- {% for origin_trial_test, origin_trial_member_list in members | groupby('origin_trial_feature_name') %}
++ {% for origin_trial_test, origin_trial_member_list in members | stringifykeygroupby('origin_trial_feature_name') %}
+ {% filter origin_trial_enabled(origin_trial_test, "executionContext") %}
+- {% for feature_name, member_list in origin_trial_member_list | groupby('runtime_enabled_feature_name') %}
++ {% for feature_name, member_list in origin_trial_member_list | stringifykeygroupby('runtime_enabled_feature_name') %}
+ {% filter runtime_enabled(feature_name) %}
+ {% for member in member_list %}
+ v8::Local<v8::Value> {{member.v8_value}};
+@@ -147,9 +147,9 @@ bool toV8{{cpp_class}}(const {{cpp_class
+ DCHECK(executionContext);
+ {% endif %}{# has_origin_trial_members #}
+ {% endif %}{# members #}
+- {% for origin_trial_test, origin_trial_member_list in members | groupby('origin_trial_feature_name') %}
++ {% for origin_trial_test, origin_trial_member_list in members | stringifykeygroupby('origin_trial_feature_name') %}
+ {% filter origin_trial_enabled(origin_trial_test, "executionContext") %}
+- {% for feature_name, member_list in origin_trial_member_list | groupby('runtime_enabled_feature_name') %}
++ {% for feature_name, member_list in origin_trial_member_list | stringifykeygroupby('runtime_enabled_feature_name') %}
+ {% filter runtime_enabled(feature_name) %}
+ {% for member in member_list %}
+ v8::Local<v8::Value> {{member.v8_value}};
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_style_shorthands.py a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_style_shorthands.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_style_shorthands.py 2022-03-08 18:21:46.719422881 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_style_shorthands.py 2022-03-08 18:23:26.586204022 +0100
+@@ -71,7 +71,7 @@ class Expansion(object):
+ def enabled_longhands(self):
+ include = lambda longhand: not longhand[
+ 'runtime_flag'] or self.is_enabled(longhand['runtime_flag'])
+- return filter(include, self._longhands)
++ return list(filter(include, self._longhands))
+
+ @property
+ def index(self):
+@@ -87,8 +87,9 @@ class Expansion(object):
+
+ def create_expansions(longhands):
+ flags = collect_runtime_flags(longhands)
+- expansions = map(lambda mask: Expansion(longhands, flags, mask),
+- range(1 << len(flags)))
++ expansions = list(
++ map(lambda mask: Expansion(longhands, flags, mask),
++ range(1 << len(flags))))
+ assert len(expansions) > 0
+ # We generate 2^N expansions for N flags, so enforce some limit.
+ assert len(flags) <= 4, 'Too many runtime flags for a single shorthand'
+@@ -114,14 +115,14 @@ class StylePropertyShorthandWriter(json5
+
+ self._longhand_dictionary = defaultdict(list)
+ for property_ in json5_properties.shorthands:
+- property_['longhand_enum_keys'] = map(enum_key_for_css_property,
+- property_['longhands'])
+- property_['longhand_property_ids'] = map(id_for_css_property,
+- property_['longhands'])
+-
+- longhands = map(
+- lambda name: json5_properties.properties_by_name[name],
+- property_['longhands'])
++ property_['longhand_enum_keys'] = list(
++ map(enum_key_for_css_property, property_['longhands']))
++ property_['longhand_property_ids'] = list(
++ map(id_for_css_property, property_['longhands']))
++
++ longhands = list(
++ map(lambda name: json5_properties.properties_by_name[name],
++ property_['longhands']))
+ property_['expansions'] = create_expansions(longhands)
+ for longhand_enum_key in property_['longhand_enum_keys']:
+ self._longhand_dictionary[longhand_enum_key].append(property_)
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py 2022-03-08 18:21:46.719422881 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py 2022-03-08 18:23:26.586204022 +0100
+@@ -42,8 +42,8 @@ class CSSPropertyInstancesWriter(json5_g
+ aliases = self._css_properties.aliases
+
+ # Lists of PropertyClassData.
+- self._property_classes_by_id = map(self.get_class, properties)
+- self._alias_classes_by_id = map(self.get_class, aliases)
++ self._property_classes_by_id = list(map(self.get_class, properties))
++ self._alias_classes_by_id = list(map(self.get_class, aliases))
+
+ # Sort by enum value.
+ self._property_classes_by_id.sort(key=lambda t: t.enum_value)
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/gperf.py a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/gperf.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/gperf.py 2022-03-08 18:21:46.722756218 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/gperf.py 2022-03-08 18:23:26.586204022 +0100
+@@ -95,7 +95,7 @@ def main():
+
+ open(args.output_file, 'wb').write(
+ generate_gperf(gperf_path,
+- open(infile).read(), gperf_args))
++ open(infile).read(), gperf_args).encode('utf-8'))
+
+
+ if __name__ == '__main__':
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_file.py a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_file.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_file.py 2022-03-08 18:21:46.722756218 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_file.py 2022-03-08 18:23:26.586204022 +0100
+@@ -66,7 +66,7 @@ class InFile(object):
+ self._defaults = defaults
+ self._valid_values = copy.deepcopy(
+ valid_values if valid_values else {})
+- self._parse(map(str.strip, lines))
++ self._parse(list(map(str.strip, lines)))
+
+ @classmethod
+ def load_from_files(self, file_paths, defaults, valid_values,
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_generator.py a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_generator.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_generator.py 2022-03-08 18:21:46.722756218 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_generator.py 2022-03-08 18:23:26.589537360 +0100
+@@ -32,10 +32,15 @@ import os
+ import os.path
+ import shlex
+ import shutil
++import sys
+ import optparse
+
+ from in_file import InFile
+
++# TODO: Remove this once Python2 is obsoleted.
++if sys.version_info.major != 2:
++ basestring = str
++
+
+ #########################################################
+ # This is now deprecated - use json5_generator.py instead
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_runtime_features.py a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_runtime_features.py
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_runtime_features.py 2022-03-08 18:21:46.722756218 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_runtime_features.py 2022-03-08 18:23:26.589537360 +0100
+@@ -138,7 +138,7 @@ class RuntimeFeatureWriter(BaseRuntimeFe
+ except Exception:
+ # If trouble unpickling, overwrite
+ pass
+- with open(os.path.abspath(file_name), 'w') as pickle_file:
++ with open(os.path.abspath(file_name), 'wb') as pickle_file:
+ pickle.dump(features_map, pickle_file)
+
+ def _template_inputs(self):
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_factory.cc.tmpl a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_factory.cc.tmpl
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_factory.cc.tmpl 2022-03-08 18:21:46.722756218 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_factory.cc.tmpl 2022-03-08 18:23:26.589537360 +0100
+@@ -26,7 +26,7 @@ using {{namespace}}FunctionMap = HashMap
+
+ static {{namespace}}FunctionMap* g_{{namespace|lower}}_constructors = nullptr;
+
+-{% for tag in tags|sort if not tag.noConstructor %}
++{% for tag in tags|sort(attribute='name') if not tag.noConstructor %}
+ static {{namespace}}Element* {{namespace}}{{tag.name.to_upper_camel_case()}}Constructor(
+ Document& document, const CreateElementFlags flags) {
+ {% if tag.runtimeEnabled %}
+@@ -52,7 +52,7 @@ static void Create{{namespace}}FunctionM
+ // Empty array initializer lists are illegal [dcl.init.aggr] and will not
+ // compile in MSVC. If tags list is empty, add check to skip this.
+ static const Create{{namespace}}FunctionMapData data[] = {
+- {% for tag in tags|sort if not tag.noConstructor %}
++ {% for tag in tags|sort(attribute='name') if not tag.noConstructor %}
+ { {{cpp_namespace}}::{{tag|symbol}}Tag, {{namespace}}{{tag.name.to_upper_camel_case()}}Constructor },
+ {% endfor %}
+ };
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_type_helpers.cc.tmpl a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_type_helpers.cc.tmpl
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_type_helpers.cc.tmpl 2022-03-08 18:21:46.726089554 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_type_helpers.cc.tmpl 2022-03-08 18:23:26.589537360 +0100
+@@ -22,7 +22,7 @@ HTMLTypeMap CreateHTMLTypeMap() {
+ const char* name;
+ HTMLElementType type;
+ } kTags[] = {
+- {% for tag in tags|sort %}
++ {% for tag in tags|sort(attribute='name') %}
+ { "{{tag.name}}", HTMLElementType::k{{tag.js_interface}} },
+ {% endfor %}
+ };
+@@ -42,7 +42,7 @@ HTMLElementType htmlElementTypeForTag(co
+ if (it == html_type_map.end())
+ return HTMLElementType::kHTMLUnknownElement;
+
+- {% for tag in tags|sort %}
++ {% for tag in tags|sort(attribute='name') %}
+ {% if tag.runtimeEnabled %}
+ if (tagName == "{{tag.name}}") {
+ if (!RuntimeEnabledFeatures::{{tag.runtimeEnabled}}Enabled(document->GetExecutionContext())) {
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_type_helpers.h.tmpl a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_type_helpers.h.tmpl
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_type_helpers.h.tmpl 2022-03-08 18:21:46.726089554 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/element_type_helpers.h.tmpl 2022-03-08 18:23:26.589537360 +0100
+@@ -15,7 +15,7 @@ namespace blink {
+ class Document;
+
+ // Type checking.
+-{% for tag in tags|sort if not tag.multipleTagNames and not tag.noTypeHelpers %}
++{% for tag in tags|sort(attribute='name') if not tag.multipleTagNames and not tag.noTypeHelpers %}
+ class {{tag.interface}};
+ template <>
+ inline bool IsElementOfType<const {{tag.interface}}>(const Node& node) {
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/macros.tmpl a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/macros.tmpl
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/macros.tmpl 2022-03-08 18:21:46.726089554 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/macros.tmpl 2022-03-08 18:23:26.589537360 +0100
+@@ -25,7 +25,7 @@
+
+
+ {% macro trie_leaf(index, object, return_macro, lowercase_data) %}
+-{% set name, value = object.items()[0] %}
++{% set name, value = (object.items()|list)[0] %}
+ {% if name|length %}
+ if (
+ {%- for c in name -%}
+@@ -45,7 +45,7 @@ return {{ return_macro(value) }};
+
+
+ {% macro trie_switch(trie, index, return_macro, lowercase_data) %}
+-{% if trie|length == 1 and trie.values()[0] is string %}
++{% if trie|length == 1 and (trie.values()|list)[0] is string %}
+ {{ trie_leaf(index, trie, return_macro, lowercase_data) -}}
+ {% else %}
+ {% if lowercase_data %}
+diff -upr b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/make_qualified_names.h.tmpl a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/make_qualified_names.h.tmpl
+--- b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/make_qualified_names.h.tmpl 2022-03-08 18:21:46.726089554 +0100
++++ a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/templates/make_qualified_names.h.tmpl 2022-03-08 18:23:26.589537360 +0100
+@@ -24,12 +24,12 @@ namespace {{cpp_namespace}} {
+ {{symbol_export}}extern const WTF::AtomicString& {{namespace_prefix}}NamespaceURI;
+
+ // Tags
+-{% for tag in tags|sort %}
++{% for tag in tags|sort(attribute='name') %}
+ {{symbol_export}}extern const blink::{{namespace}}QualifiedName& {{tag|symbol}}Tag;
+ {% endfor %}
+
+ // Attributes
+-{% for attr in attrs|sort %}
++{% for attr in attrs|sort(attribute='name') %}
+ {{symbol_export}}extern const blink::QualifiedName& {{attr|symbol}}Attr;
+ {% endfor %}
+
+diff -upr b/src/3rdparty/chromium/third_party/dawn/generator/generator_lib.py a/src/3rdparty/chromium/third_party/dawn/generator/generator_lib.py
+--- b/src/3rdparty/chromium/third_party/dawn/generator/generator_lib.py 2022-03-08 18:21:48.266091088 +0100
++++ a/src/3rdparty/chromium/third_party/dawn/generator/generator_lib.py 2022-03-08 18:23:26.589537360 +0100
+@@ -201,6 +201,10 @@ def _compute_python_dependencies(root_di
+
+ paths = set()
+ for path in module_paths:
++ # Builtin/namespaced modules may return None for the file path.
++ if not path:
++ continue
++
+ path = os.path.abspath(path)
+
+ if not path.startswith(root_dir):
+diff -upr b/src/3rdparty/chromium/third_party/devtools-frontend/src/BUILD.gn a/src/3rdparty/chromium/third_party/devtools-frontend/src/BUILD.gn
+--- b/src/3rdparty/chromium/third_party/devtools-frontend/src/BUILD.gn 2022-03-08 18:21:48.312757801 +0100
++++ a/src/3rdparty/chromium/third_party/devtools-frontend/src/BUILD.gn 2022-03-08 18:23:26.589537360 +0100
+@@ -2,6 +2,8 @@
+ # Use of this source code is governed by a BSD-style license that can be
+ # found in the LICENSE file.
+
++import("//build/config/python.gni")
++
+ import("//third_party/blink/public/public_features.gni")
+ import("./all_devtools_files.gni")
+ import("./all_devtools_modules.gni")
+diff -upr b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_inspector_overlay.py a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_inspector_overlay.py
+--- b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_inspector_overlay.py 2022-03-08 18:21:50.282759764 +0100
++++ a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_inspector_overlay.py 2022-03-08 18:23:26.589537360 +0100
+@@ -45,7 +45,8 @@ def rollup(input_path, output_path, file
+ ['--format', 'iife', '-n', 'InspectorOverlay'] + ['--input', target] +
+ ['--plugin', rollup_plugin],
+ stdout=subprocess.PIPE,
+- stderr=subprocess.PIPE)
++ stderr=subprocess.PIPE,
++ text=True)
+ out, error = rollup_process.communicate()
+ if not out:
+ raise Exception("rollup failed: " + error)
+diff -upr b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_release_applications.py a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_release_applications.py
+--- b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_release_applications.py 2022-03-08 18:21:50.282759764 +0100
++++ a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_release_applications.py 2022-03-08 18:23:26.589537360 +0100
+@@ -10,7 +10,7 @@ Builds applications in release mode:
+ and the application loader into a single script.
+ """
+
+-from cStringIO import StringIO
++from io import StringIO
+ from os import path
+ from os.path import join
+ import copy
+@@ -145,8 +145,7 @@ class ReleaseBuilder(object):
+ resource_content = read_file(path.join(self.application_dir, resource_name))
+ if not (resource_name.endswith('.html')
+ or resource_name.endswith('md')):
+- resource_content += resource_source_url(resource_name).encode(
+- 'utf-8')
++ resource_content += resource_source_url(resource_name)
+ resource_content = resource_content.replace('\\', '\\\\')
+ resource_content = resource_content.replace('\n', '\\n')
+ resource_content = resource_content.replace('"', '\\"')
+@@ -173,7 +172,9 @@ class ReleaseBuilder(object):
+ def _concatenate_application_script(self, output):
+ output.write('Root.allDescriptors.push(...%s);' % self._release_module_descriptors())
+ if self.descriptors.extends:
+- output.write('Root.applicationDescriptor.modules.push(...%s);' % json.dumps(self.descriptors.application.values()))
++ output.write(
++ 'Root.applicationDescriptor.modules.push(...%s);' %
++ json.dumps(list(self.descriptors.application.values())))
+ else:
+ output.write('Root.applicationDescriptor = %s;' % self.descriptors.application_json())
+
+diff -upr b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_devtools_grd.py a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_devtools_grd.py
+--- b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_devtools_grd.py 2022-03-08 18:21:50.282759764 +0100
++++ a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_devtools_grd.py 2022-03-08 18:23:26.589537360 +0100
+@@ -123,7 +123,7 @@ def main(argv):
+
+ try:
+ os.makedirs(path.join(output_directory, 'Images'))
+- except OSError, e:
++ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise e
+
+@@ -147,7 +147,7 @@ def main(argv):
+ shutil.copy(path.join(dirname, filename), path.join(output_directory, 'Images'))
+ add_file_to_grd(doc, path.join('Images', filename))
+
+- with open(parsed_args.output_filename, 'w') as output_file:
++ with open(parsed_args.output_filename, 'wb') as output_file:
+ output_file.write(doc.toxml(encoding='UTF-8'))
+
+
+diff -upr b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/modular_build.py a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/modular_build.py
+--- b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/modular_build.py 2022-03-08 18:21:50.282759764 +0100
++++ a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/modular_build.py 2022-03-08 18:23:26.589537360 +0100
+@@ -7,6 +7,8 @@
+ Utilities for the modular DevTools build.
+ """
+
++from __future__ import print_function
++
+ import collections
+ from os import path
+ import os
+@@ -40,7 +42,7 @@ def load_and_parse_json(filename):
+ try:
+ return json.loads(read_file(filename))
+ except:
+- print 'ERROR: Failed to parse %s' % filename
++ print('ERROR: Failed to parse %s' % filename)
+ raise
+
+ class Descriptors:
+@@ -57,7 +59,7 @@ class Descriptors:
+
+ def application_json(self):
+ result = dict()
+- result['modules'] = self.application.values()
++ result['modules'] = list(self.application.values())
+ return json.dumps(result)
+
+ def all_compiled_files(self):
+diff -upr b/src/3rdparty/chromium/third_party/jinja2/tests.py a/src/3rdparty/chromium/third_party/jinja2/tests.py
+--- b/src/3rdparty/chromium/third_party/jinja2/tests.py 2022-03-08 18:21:51.709427852 +0100
++++ a/src/3rdparty/chromium/third_party/jinja2/tests.py 2022-03-08 18:23:26.589537360 +0100
+@@ -10,7 +10,7 @@
+ """
+ import operator
+ import re
+-from collections import Mapping
++from collections.abc import Mapping
+ from jinja2.runtime import Undefined
+ from jinja2._compat import text_type, string_types, integer_types
+ import decimal
+diff -upr b/src/3rdparty/chromium/tools/metrics/ukm/gen_builders.py a/src/3rdparty/chromium/tools/metrics/ukm/gen_builders.py
+--- b/src/3rdparty/chromium/tools/metrics/ukm/gen_builders.py 2022-03-08 18:21:55.126097923 +0100
++++ a/src/3rdparty/chromium/tools/metrics/ukm/gen_builders.py 2022-03-08 18:23:26.589537360 +0100
+@@ -48,9 +48,10 @@ def ReadFilteredData(path):
+ data = ukm_model.UKM_XML_TYPE.Parse(ukm_file.read())
+ event_tag = ukm_model._EVENT_TYPE.tag
+ metric_tag = ukm_model._METRIC_TYPE.tag
+- data[event_tag] = filter(ukm_model.IsNotObsolete, data[event_tag])
++ data[event_tag] = list(filter(ukm_model.IsNotObsolete, data[event_tag]))
+ for event in data[event_tag]:
+- event[metric_tag] = filter(ukm_model.IsNotObsolete, event[metric_tag])
++ event[metric_tag] = list(
++ filter(ukm_model.IsNotObsolete, event[metric_tag]))
+ return data
+
+
+diff -upr b/src/3rdparty/chromium/ui/ozone/generate_constructor_list.py a/src/3rdparty/chromium/ui/ozone/generate_constructor_list.py
+--- b/src/3rdparty/chromium/ui/ozone/generate_constructor_list.py 2022-03-08 18:21:55.569431698 +0100
++++ a/src/3rdparty/chromium/ui/ozone/generate_constructor_list.py 2022-03-08 18:23:26.589537360 +0100
+@@ -45,12 +45,15 @@ Example Output: ./ui/ozone/generate_cons
+ } // namespace ui
+ """
+
++try:
++ from StringIO import StringIO # for Python 2
++except ImportError:
++ from io import StringIO # for Python 3
+ import optparse
+ import os
+ import collections
+ import re
+ import sys
+-import string
+
+
+ def GetTypedefName(typename):
+@@ -68,7 +71,7 @@ def GetConstructorName(typename, platfor
+ This is just "Create" + typename + platform.
+ """
+
+- return 'Create' + typename + string.capitalize(platform)
++ return 'Create' + typename + platform.capitalize()
+
+
+ def GenerateConstructorList(out, namespace, export, typenames, platforms,
+@@ -163,12 +166,14 @@ def main(argv):
+ sys.exit(1)
+
+ # Write to standard output or file specified by --output_cc.
+- out_cc = sys.stdout
++ out_cc = getattr(sys.stdout, 'buffer', sys.stdout)
+ if options.output_cc:
+ out_cc = open(options.output_cc, 'wb')
+
+- GenerateConstructorList(out_cc, options.namespace, options.export,
++ out_cc_str = StringIO()
++ GenerateConstructorList(out_cc_str, options.namespace, options.export,
+ typenames, platforms, includes, usings)
++ out_cc.write(out_cc_str.getvalue().encode('utf-8'))
+
+ if options.output_cc:
+ out_cc.close()
+diff -upr b/src/3rdparty/chromium/ui/ozone/generate_ozone_platform_list.py a/src/3rdparty/chromium/ui/ozone/generate_ozone_platform_list.py
+--- b/src/3rdparty/chromium/ui/ozone/generate_ozone_platform_list.py 2022-03-08 18:21:55.569431698 +0100
++++ a/src/3rdparty/chromium/ui/ozone/generate_ozone_platform_list.py 2022-03-08 18:23:26.592870697 +0100
+@@ -49,12 +49,15 @@ Example Output: ./generate_ozone_platfor
+
+ """
+
++try:
++ from StringIO import StringIO # for Python 2
++except ImportError:
++ from io import StringIO # for Python 3
+ import optparse
+ import os
+ import collections
+ import re
+ import sys
+-import string
+
+
+ def GetConstantName(name):
+@@ -63,7 +66,7 @@ def GetConstantName(name):
+ We just capitalize the platform name and prepend "CreateOzonePlatform".
+ """
+
+- return 'kPlatform' + string.capitalize(name)
++ return 'kPlatform' + name.capitalize()
+
+
+ def GeneratePlatformListText(out, platforms):
+@@ -149,9 +152,9 @@ def main(argv):
+ platforms.insert(0, options.default)
+
+ # Write to standard output or file specified by --output_{cc,h}.
+- out_cc = sys.stdout
+- out_h = sys.stdout
+- out_txt = sys.stdout
++ out_cc = getattr(sys.stdout, 'buffer', sys.stdout)
++ out_h = getattr(sys.stdout, 'buffer', sys.stdout)
++ out_txt = getattr(sys.stdout, 'buffer', sys.stdout)
+ if options.output_cc:
+ out_cc = open(options.output_cc, 'wb')
+ if options.output_h:
+@@ -159,9 +162,16 @@ def main(argv):
+ if options.output_txt:
+ out_txt = open(options.output_txt, 'wb')
+
+- GeneratePlatformListText(out_txt, platforms)
+- GeneratePlatformListHeader(out_h, platforms)
+- GeneratePlatformListSource(out_cc, platforms)
++ out_txt_str = StringIO()
++ out_h_str = StringIO()
++ out_cc_str = StringIO()
++
++ GeneratePlatformListText(out_txt_str, platforms)
++ out_txt.write(out_txt_str.getvalue().encode('utf-8'))
++ GeneratePlatformListHeader(out_h_str, platforms)
++ out_h.write(out_h_str.getvalue().encode('utf-8'))
++ GeneratePlatformListSource(out_cc_str, platforms)
++ out_cc.write(out_cc_str.getvalue().encode('utf-8'))
+
+ if options.output_cc:
+ out_cc.close()
diff --git a/repo/qt5-qtwebengine/qt-musl-crashpad.patch b/repo/qt5-qtwebengine/qt-musl-crashpad.patch
new file mode 100644
index 0000000..0dd9789
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-crashpad.patch
@@ -0,0 +1,13 @@
+diff --git a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h
+index 5b55c24..08cec52 100644
+--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h
++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/util/linux/thread_info.h
+@@ -273,7 +273,7 @@ union FloatContext {
+ "Size mismatch");
+ #elif defined(ARCH_CPU_ARMEL)
+ static_assert(sizeof(f32_t::fpregs) == sizeof(user_fpregs), "Size mismatch");
+-#if !defined(__GLIBC__)
++#if defined(OS_ANDROID)
+ static_assert(sizeof(f32_t::vfp) == sizeof(user_vfp), "Size mismatch");
+ #endif
+ #elif defined(ARCH_CPU_ARM64)
diff --git a/repo/qt5-qtwebengine/qt-musl-dispatch_to_musl.patch b/repo/qt5-qtwebengine/qt-musl-dispatch_to_musl.patch
new file mode 100644
index 0000000..a58a688
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-dispatch_to_musl.patch
@@ -0,0 +1,103 @@
+--- a/src/3rdparty/chromium/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
++++ b/src/3rdparty/chromium/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
+@@ -8,6 +8,7 @@
+ #include <dlfcn.h>
+ #include <malloc.h>
+
++#if defined(__GLIBC__)
+ // This translation unit defines a default dispatch for the allocator shim which
+ // routes allocations to libc functions.
+ // The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
+@@ -87,3 +88,92 @@ const AllocatorDispatch AllocatorDispatc
+ nullptr, /* aligned_free_function */
+ nullptr, /* next */
+ };
++
++#else // defined(__GLIBC__)
++
++#include <dlfcn.h>
++
++extern "C" {
++// Declare function pointers to the memory functions
++typedef void* (*t_libc_malloc)(size_t size);
++typedef void* (*t_libc_calloc)(size_t n, size_t size);
++typedef void* (*t_libc_realloc)(void* address, size_t size);
++typedef void* (*t_libc_memalign)(size_t alignment, size_t size);
++typedef void (*t_libc_free)(void* ptr);
++typedef size_t (*t_libc_malloc_usable_size)(void* ptr);
++
++// Static instances of pointers to libc.so dl symbols
++static t_libc_malloc libc_malloc = NULL;
++static t_libc_calloc libc_calloc = NULL;
++static t_libc_realloc libc_realloc = NULL;
++static t_libc_memalign libc_memalign = NULL;
++static t_libc_free libc_free = NULL;
++static t_libc_malloc_usable_size libc_malloc_usable_size = NULL;
++
++// resolve the symbols in libc.so
++void musl_libc_memory_init(void)
++{
++ libc_malloc = (t_libc_malloc) dlsym(RTLD_NEXT, "malloc");
++ libc_calloc = (t_libc_calloc) dlsym(RTLD_NEXT, "calloc");
++ libc_realloc = (t_libc_realloc) dlsym(RTLD_NEXT, "realloc");
++ libc_memalign = (t_libc_memalign) dlsym(RTLD_NEXT, "memalign");
++ libc_free = (t_libc_free) dlsym(RTLD_NEXT, "free");
++ libc_malloc_usable_size = (t_libc_malloc_usable_size) dlsym(RTLD_NEXT, "malloc_usable_size");
++}
++} // extern "C"
++
++namespace {
++
++using base::allocator::AllocatorDispatch;
++
++void* MuslMalloc(const AllocatorDispatch*, size_t size, void* context) {
++ if (!libc_malloc)
++ musl_libc_memory_init();
++ return (*libc_malloc)(size);
++}
++
++void* MuslCalloc(const AllocatorDispatch*, size_t n, size_t size, void* context) {
++ if (!libc_calloc)
++ musl_libc_memory_init();
++ return (*libc_calloc)(n, size);
++}
++
++void* MuslRealloc(const AllocatorDispatch*, void* address, size_t size, void* context) {
++ if (!libc_realloc)
++ musl_libc_memory_init();
++ return (*libc_realloc)(address, size);
++}
++
++void* MuslMemalign(const AllocatorDispatch*, size_t alignment, size_t size, void* context) {
++ if (!libc_memalign)
++ musl_libc_memory_init();
++ return (*libc_memalign)(alignment, size);
++}
++
++void MuslFree(const AllocatorDispatch*, void* address, void* context) {
++ if (!libc_free)
++ musl_libc_memory_init();
++ (*libc_free)(address);
++}
++
++size_t MuslGetSizeEstimate(const AllocatorDispatch*, void* address, void* context) {
++ // TODO(siggi, primiano): malloc_usable_size may need redirection in the
++ // presence of interposing shims that divert allocations.
++ if (!libc_malloc_usable_size)
++ musl_libc_memory_init();
++ return (*libc_malloc_usable_size)(address);
++}
++
++} // namespace
++
++const AllocatorDispatch AllocatorDispatch::default_dispatch = {
++ &MuslMalloc, /* alloc_function */
++ &MuslCalloc, /* alloc_zero_initialized_function */
++ &MuslMemalign, /* alloc_aligned_function */
++ &MuslRealloc, /* realloc_function */
++ &MuslFree, /* free_function */
++ &MuslGetSizeEstimate, /* get_size_estimate_function */
++ nullptr, /* next */
++};
++
++#endif
diff --git a/repo/qt5-qtwebengine/qt-musl-elf-arm.patch b/repo/qt5-qtwebengine/qt-musl-elf-arm.patch
new file mode 100644
index 0000000..9253c41
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-elf-arm.patch
@@ -0,0 +1,13 @@
+diff --git a/src/3rdparty/chromium/v8/src/base/cpu.cc b/src/3rdparty/chromium/v8/src/base/cpu.cc
+index f1c48fa13..ba8389c8c 100644
+--- a/src/3rdparty/chromium/v8/src/base/cpu.cc
++++ b/src/3rdparty/chromium/v8/src/base/cpu.cc
+@@ -20,7 +20,7 @@
+ #if V8_OS_QNX
+ #include <sys/syspage.h> // cpuinfo
+ #endif
+-#if V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64)
++#if V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 || V8_HOST_ARCH_ARM)
+ #include <elf.h>
+ #endif
+ #if V8_OS_AIX
diff --git a/repo/qt5-qtwebengine/qt-musl-execinfo.patch b/repo/qt5-qtwebengine/qt-musl-execinfo.patch
new file mode 100644
index 0000000..01a5c94
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-execinfo.patch
@@ -0,0 +1,108 @@
+diff --git a/src/3rdparty/chromium/base/debug/stack_trace.cc b/src/3rdparty/chromium/base/debug/stack_trace.cc
+index d8ca822d9..f6f3d9c69 100644
+--- a/src/3rdparty/chromium/base/debug/stack_trace.cc
++++ b/src/3rdparty/chromium/base/debug/stack_trace.cc
+@@ -225,14 +225,14 @@ std::string StackTrace::ToString() const {
+ }
+ std::string StackTrace::ToStringWithPrefix(const char* prefix_string) const {
+ std::stringstream stream;
+-#if !defined(__UCLIBC__) && !defined(_AIX)
++#if defined(__GLIBC__) && !defined(_AIX)
+ OutputToStreamWithPrefix(&stream, prefix_string);
+ #endif
+ return stream.str();
+ }
+
+ std::ostream& operator<<(std::ostream& os, const StackTrace& s) {
+-#if !defined(__UCLIBC__) & !defined(_AIX)
++#if defined(__GLIBC__) & !defined(_AIX)
+ s.OutputToStream(&os);
+ #else
+ os << "StackTrace::OutputToStream not implemented.";
+diff --git a/src/3rdparty/chromium/base/debug/stack_trace_posix.cc b/src/3rdparty/chromium/base/debug/stack_trace_posix.cc
+index f4ddf9c1e..aef993613 100644
+--- a/src/3rdparty/chromium/base/debug/stack_trace_posix.cc
++++ b/src/3rdparty/chromium/base/debug/stack_trace_posix.cc
+@@ -27,7 +27,7 @@
+ #if !defined(USE_SYMBOLIZE)
+ #include <cxxabi.h>
+ #endif
+-#if !defined(__UCLIBC__) && !defined(_AIX)
++#if defined(__GLIBC__) && !defined(_AIX)
+ #include <execinfo.h>
+ #endif
+
+@@ -88,7 +88,7 @@ void DemangleSymbols(std::string* text) {
+ // Note: code in this function is NOT async-signal safe (std::string uses
+ // malloc internally).
+
+-#if !defined(__UCLIBC__) && !defined(_AIX)
++#if defined(__GLIBC__) && !defined(_AIX)
+ std::string::size_type search_from = 0;
+ while (search_from < text->size()) {
+ // Look for the start of a mangled symbol, from search_from.
+@@ -123,7 +123,7 @@ void DemangleSymbols(std::string* text) {
+ search_from = mangled_start + 2;
+ }
+ }
+-#endif // !defined(__UCLIBC__) && !defined(_AIX)
++#endif // defined(__GLIBC__) && !defined(_AIX)
+ }
+ #endif // !defined(USE_SYMBOLIZE)
+
+@@ -135,7 +135,7 @@ class BacktraceOutputHandler {
+ virtual ~BacktraceOutputHandler() = default;
+ };
+
+-#if !defined(__UCLIBC__) && !defined(_AIX)
++#if defined(__GLIBC__) && !defined(_AIX)
+ void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
+ // This should be more than enough to store a 64-bit number in hex:
+ // 16 hex digits + 1 for null-terminator.
+@@ -218,7 +218,7 @@ void ProcessBacktrace(void* const* trace,
+ }
+ #endif // defined(USE_SYMBOLIZE)
+ }
+-#endif // !defined(__UCLIBC__) && !defined(_AIX)
++#endif // defined(__GLIBC__) && !defined(_AIX)
+
+ void PrintToStderr(const char* output) {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+@@ -834,7 +834,7 @@ size_t CollectStackTrace(void** trace, size_t count) {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+-#if !defined(__UCLIBC__) && !defined(_AIX)
++#if defined(__GLIBC__) && !defined(_AIX)
+ // Though the backtrace API man page does not list any possible negative
+ // return values, we take no chance.
+ return base::saturated_cast<size_t>(backtrace(trace, count));
+@@ -847,13 +847,13 @@ void StackTrace::PrintWithPrefix(const char* prefix_string) const {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+-#if !defined(__UCLIBC__) && !defined(_AIX)
++#if defined(__GLIBC__) && !defined(_AIX)
+ PrintBacktraceOutputHandler handler;
+ ProcessBacktrace(trace_, count_, prefix_string, &handler);
+ #endif
+ }
+
+-#if !defined(__UCLIBC__) && !defined(_AIX)
++#if defined(__GLIBC__) && !defined(_AIX)
+ void StackTrace::OutputToStreamWithPrefix(std::ostream* os,
+ const char* prefix_string) const {
+ StreamBacktraceOutputHandler handler(os);
+diff --git a/src/3rdparty/chromium/base/logging.cc b/src/3rdparty/chromium/base/logging.cc
+index 4c4bfa6af..0ca5c2159 100644
+--- a/src/3rdparty/chromium/base/logging.cc
++++ b/src/3rdparty/chromium/base/logging.cc
+@@ -548,7 +548,7 @@ LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
+
+ LogMessage::~LogMessage() {
+ size_t stack_start = stream_.tellp();
+-#if !defined(OFFICIAL_BUILD) && !defined(OS_NACL) && !defined(__UCLIBC__) && \
++#if !defined(OFFICIAL_BUILD) && !defined(OS_NACL) && defined(__GLIBC__) && \
+ !defined(OS_AIX)
+ if (severity_ == LOG_FATAL && !base::debug::BeingDebugged()) {
+ // Include a stack trace on a fatal, unless a debugger is attached.
diff --git a/repo/qt5-qtwebengine/qt-musl-mallinfo.patch b/repo/qt5-qtwebengine/qt-musl-mallinfo.patch
new file mode 100644
index 0000000..c7b7f27
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-mallinfo.patch
@@ -0,0 +1,43 @@
+diff --git a/src/3rdparty/chromium/base/process/process_metrics_posix.cc b/src/3rdparty/chromium/base/process/process_metrics_posix.cc
+index 9d12c427b..9030de9f6 100644
+--- a/src/3rdparty/chromium/base/process/process_metrics_posix.cc
++++ b/src/3rdparty/chromium/base/process/process_metrics_posix.cc
+@@ -119,14 +119,14 @@ size_t ProcessMetrics::GetMallocUsage() {
+ malloc_statistics_t stats = {0};
+ malloc_zone_statistics(nullptr, &stats);
+ return stats.size_in_use;
+-#elif defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID)
++#elif (defined(OS_LINUX) && defined(__GLIBC__)) || defined(OS_CHROMEOS) || defined(OS_ANDROID)
+ struct mallinfo minfo = mallinfo();
+ #if BUILDFLAG(USE_TCMALLOC)
+ return minfo.uordblks;
+ #else
+ return minfo.hblkhd + minfo.arena;
+ #endif
+-#elif defined(OS_FUCHSIA)
++#else //if defined(OS_FUCHSIA) // also musl doesn't do this.
+ // TODO(fuchsia): Not currently exposed. https://crbug.com/735087.
+ return 0;
+ #endif
+diff --git a/src/3rdparty/chromium/base/trace_event/malloc_dump_provider.cc b/src/3rdparty/chromium/base/trace_event/malloc_dump_provider.cc
+index c327f4865..2717eca5a 100644
+--- a/src/3rdparty/chromium/base/trace_event/malloc_dump_provider.cc
++++ b/src/3rdparty/chromium/base/trace_event/malloc_dump_provider.cc
+@@ -132,7 +132,7 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+ }
+ #elif defined(OS_FUCHSIA)
+ // TODO(fuchsia): Port, see https://crbug.com/706592.
+-#else
++#elif defined(__GLIBC__)
+ struct mallinfo info = mallinfo();
+ // In case of Android's jemalloc |arena| is 0 and the outer pages size is
+ // reported by |hblkhd|. In case of dlmalloc the total is given by
+@@ -142,6 +142,8 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+
+ // Total allocated space is given by |uordblks|.
+ allocated_objects_size = info.uordblks;
++#else
++ // musl libc does not support mallinfo()
+ #endif
+
+ MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
diff --git a/repo/qt5-qtwebengine/qt-musl-off_t.patch b/repo/qt5-qtwebengine/qt-musl-off_t.patch
new file mode 100644
index 0000000..6b44789
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-off_t.patch
@@ -0,0 +1,10 @@
+--- a/src/3rdparty/chromium/third_party/ots/include/opentype-sanitiser.h
++++ b/src/3rdparty/chromium/third_party/ots/include/opentype-sanitiser.h
+@@ -21,6 +21,7 @@ typedef unsigned __int64 uint64_t;
+ #define ots_htons(x) _byteswap_ushort (x)
+ #else
+ #include <arpa/inet.h>
++#include <sys/types.h>
+ #include <stdint.h>
+ #define ots_ntohl(x) ntohl (x)
+ #define ots_ntohs(x) ntohs (x)
diff --git a/repo/qt5-qtwebengine/qt-musl-pread-pwrite.patch b/repo/qt5-qtwebengine/qt-musl-pread-pwrite.patch
new file mode 100644
index 0000000..623f609
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-pread-pwrite.patch
@@ -0,0 +1,20 @@
+diff --git a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h b/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h
+index 5d9c2e8..e81e7b4 100644
+--- a/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h
++++ b/src/3rdparty/chromium/third_party/lss/linux_syscall_support.h
+@@ -1835,6 +1835,15 @@ struct kernel_statfs {
+ /* End of s390/s390x definitions */
+ #endif
+
++#ifndef __GLIBC__
++ /* For Musl libc pread/pread is the same as pread64/pwrite64 */
++#ifndef __NR_pread
++#define __NR_pread __NR_pread64
++#endif
++#ifndef __NR_pwrite
++#define __NR_pwrite __NR_pwrite64
++#endif
++#endif /* ifndef __GLIBC__ */
+
+ /* After forking, we must make sure to only call system calls. */
+ #if defined(__BOUNDED_POINTERS__)
diff --git a/repo/qt5-qtwebengine/qt-musl-pvalloc.patch b/repo/qt5-qtwebengine/qt-musl-pvalloc.patch
new file mode 100644
index 0000000..d5caf38
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-pvalloc.patch
@@ -0,0 +1,14 @@
+--- qtwebengine/src/core/api/qtbug-61521.cpp 2017-11-29 09:42:29.000000000 +0100
++++ qtwebengine/src/core/api/qtbug-61521.cpp 2018-01-28 06:49:29.454175725 +0100
+@@ -111,7 +111,11 @@
+ }
+
+ SHIM_HIDDEN void* ShimPvalloc(size_t size) {
++#if defined(__GLIBC__)
+ return pvalloc(size);
++#else
++ return valloc((size+4095)&~4095);
++#endif
+ }
+
+ SHIM_HIDDEN int ShimPosixMemalign(void** r, size_t a, size_t s) {
diff --git a/repo/qt5-qtwebengine/qt-musl-resolve.patch b/repo/qt5-qtwebengine/qt-musl-resolve.patch
new file mode 100644
index 0000000..386062e
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-resolve.patch
@@ -0,0 +1,61 @@
+--- a/src/3rdparty/chromium/net/dns/dns_reloader.cc
++++ b/src/3rdparty/chromium/net/dns/dns_reloader.cc
+@@ -9,6 +9,10 @@
+
+ #include <resolv.h>
+
++#if !defined(__GLIBC__)
++#include "resolv_compat.h"
++#endif
++
+ #include "base/lazy_instance.h"
+ #include "base/macros.h"
+ #include "base/notreached.h"
+--- a/src/3rdparty/chromium/net/dns/dns_config_service_posix.cc
++++ b/src/3rdparty/chromium/net/dns/dns_config_service_posix.cc
+@@ -8,6 +8,10 @@
+ #include <string>
+ #include <type_traits>
+
++#if !defined(__GLIBC__)
++#include "resolv_compat.h"
++#endif
++
+ #include "base/bind.h"
+ #include "base/files/file.h"
+ #include "base/files/file_path.h"
+diff --git a/src/3rdparty/chromium/net/dns/resolv_compat.h b/src/3rdparty/chromium/net/dns/resolv_compat.h
+new file mode 100644
+index 0000000..4f0e852
+--- /dev/null
++++ b/src/3rdparty/chromium/net/dns/resolv_compat.h
+@@ -0,0 +1,29 @@
++#if !defined(__GLIBC__)
++/***************************************************************************
++ * resolv_compat.h
++ *
++ * Mimick GLIBC's res_ninit() and res_nclose() for musl libc
++ * Note: res_init() is actually deprecated according to
++ * http://docs.oracle.com/cd/E36784_01/html/E36875/res-nclose-3resolv.html
++ **************************************************************************/
++#include <string.h>
++
++static inline int res_ninit(res_state statp)
++{
++ int rc = res_init();
++ if (statp != &_res) {
++ memcpy(statp, &_res, sizeof(*statp));
++ }
++ return rc;
++}
++
++static inline int res_nclose(res_state statp)
++{
++ if (!statp)
++ return -1;
++ if (statp != &_res) {
++ memset(statp, 0, sizeof(*statp));
++ }
++ return 0;
++}
++#endif
diff --git a/repo/qt5-qtwebengine/qt-musl-siginfo_t.patch b/repo/qt5-qtwebengine/qt-musl-siginfo_t.patch
new file mode 100644
index 0000000..b071563
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-siginfo_t.patch
@@ -0,0 +1,18 @@
+There's a subtle difference in the internal name of siginfo_t fields
+between glibc and musl. The structure itself is equivalent, so it
+should suffice to add a macro to rename the field.
+
+--- a/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/trap.cc
++++ b/src/3rdparty/chromium/sandbox/linux/seccomp-bpf/trap.cc
+@@ -25,6 +25,11 @@
+ #include "sandbox/linux/system_headers/linux_seccomp.h"
+ #include "sandbox/linux/system_headers/linux_signal.h"
+
++// musl libc defines siginfo_t __si_fields instead of _sifields
++#if !defined(__GLIBC__)
++#define _sifields __si_fields
++#endif
++
+ namespace {
+
+ struct arch_sigsys {
diff --git a/repo/qt5-qtwebengine/qt-musl-stackstart.patch b/repo/qt5-qtwebengine/qt-musl-stackstart.patch
new file mode 100644
index 0000000..2214af4
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-stackstart.patch
@@ -0,0 +1,22 @@
+diff --git a/src/3rdparty/chromium/third_party/blink/renderer/platform/wtf/stack_util.cc b/src/3rdparty/chromium/third_party/blink/renderer/platform/wtf/stack_util.cc
+index 1aaaa1c60..f49152fa6 100644
+--- a/src/3rdparty/chromium/third_party/blink/renderer/platform/wtf/stack_util.cc
++++ b/src/3rdparty/chromium/third_party/blink/renderer/platform/wtf/stack_util.cc
+@@ -29,7 +29,7 @@ size_t GetUnderestimatedStackSize() {
+ // FIXME: On Mac OSX and Linux, this method cannot estimate stack size
+ // correctly for the main thread.
+
+-#elif defined(__GLIBC__) || defined(OS_ANDROID) || defined(OS_FREEBSD) || \
++#elif defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_FREEBSD) || \
+ defined(OS_FUCHSIA)
+ // pthread_getattr_np() can fail if the thread is not invoked by
+ // pthread_create() (e.g., the main thread of blink_unittests).
+@@ -97,7 +97,7 @@ return Threading::ThreadStackSize();
+ }
+
+ void* GetStackStart() {
+-#if defined(__GLIBC__) || defined(OS_ANDROID) || defined(OS_FREEBSD) || \
++#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_FREEBSD) || \
+ defined(OS_FUCHSIA)
+ pthread_attr_t attr;
+ int error;
diff --git a/repo/qt5-qtwebengine/qt-musl-sysreg-for__WORDSIZE.patch b/repo/qt5-qtwebengine/qt-musl-sysreg-for__WORDSIZE.patch
new file mode 100644
index 0000000..de9377e
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-sysreg-for__WORDSIZE.patch
@@ -0,0 +1,14 @@
+diff --git a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/elf_core_dump.h b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/elf_core_dump.h
+index d03c7a8..d43fda0 100644
+--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/elf_core_dump.h
++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/common/linux/elf_core_dump.h
+@@ -37,6 +37,9 @@
+ #include <limits.h>
+ #include <link.h>
+ #include <stddef.h>
++#ifndef __GLIBC__
++#include <sys/reg.h>
++#endif
+
+ #include "common/memory_range.h"
+
diff --git a/repo/qt5-qtwebengine/qt-musl-thread-stacksize.patch b/repo/qt5-qtwebengine/qt-musl-thread-stacksize.patch
new file mode 100644
index 0000000..80c3d34
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-thread-stacksize.patch
@@ -0,0 +1,26 @@
+diff --git a/src/3rdparty/chromium/ppapi/utility/threading/simple_thread.cc b/src/3rdparty/chromium/ppapi/utility/threading/simple_thread.cc
+index 02bf49b..05ee182 100644
+--- a/src/3rdparty/chromium/ppapi/utility/threading/simple_thread.cc
++++ b/src/3rdparty/chromium/ppapi/utility/threading/simple_thread.cc
+@@ -13,7 +13,7 @@ namespace pp {
+ namespace {
+
+ // Use 2MB default stack size for Native Client, otherwise use system default.
+-#if defined(__native_client__)
++#if defined(__native_client__) || !defined(__GLIBC__)
+ const size_t kDefaultStackSize = 2 * 1024 * 1024;
+ #else
+ const size_t kDefaultStackSize = 0;
+diff --git a/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc b/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc
+index cf7f3ec..e06a5ce 100644
+--- a/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc
++++ b/src/3rdparty/chromium/v8/src/base/platform/platform-posix.cc
+@@ -854,7 +854,7 @@ void Thread::Start() {
+ #if V8_OS_MACOSX
+ // Default on Mac OS X is 512kB -- bump up to 1MB
+ stack_size = 1 * 1024 * 1024;
+-#elif V8_OS_AIX
++#elif V8_OS_AIX || !defined(__GLIBC__)
+ // Default on AIX is 96kB -- bump up to 2MB
+ stack_size = 2 * 1024 * 1024;
+ #endif
diff --git a/repo/qt5-qtwebengine/qt-musl-tid-caching.patch b/repo/qt5-qtwebengine/qt-musl-tid-caching.patch
new file mode 100644
index 0000000..acbee25
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt-musl-tid-caching.patch
@@ -0,0 +1,81 @@
+--- ./src/3rdparty/chromium/sandbox/linux/services/namespace_sandbox.cc.orig
++++ ./src/3rdparty/chromium/sandbox/linux/services/namespace_sandbox.cc
+@@ -209,6 +209,70 @@
+ return base::LaunchProcess(argv, launch_options_copy);
+ }
+
++#if defined(__aarch64__) || defined(__arm__)
++#define TLS_ABOVE_TP
++#endif
++
++struct musl_pthread
++{
++ /* Part 1 -- these fields may be external or
++ * internal (accessed via asm) ABI. Do not change. */
++ struct pthread *self;
++#ifndef TLS_ABOVE_TP
++ uintptr_t *dtv;
++#endif
++ struct pthread *prev, *next; /* non-ABI */
++ uintptr_t sysinfo;
++#ifndef TLS_ABOVE_TP
++#ifdef CANARY_PAD
++ uintptr_t canary_pad;
++#endif
++ uintptr_t canary;
++#endif
++
++/* Part 2 -- implementation details, non-ABI. */
++ int tid;
++ int errno_val;
++ volatile int detach_state;
++ volatile int cancel;
++ volatile unsigned char canceldisable, cancelasync;
++ unsigned char tsd_used:1;
++ unsigned char dlerror_flag:1;
++ unsigned char *map_base;
++ size_t map_size;
++ void *stack;
++ size_t stack_size;
++ size_t guard_size;
++ void *result;
++ struct __ptcb *cancelbuf;
++ void **tsd;
++ struct {
++ volatile void *volatile head;
++ long off;
++ volatile void *volatile pending;
++ } robust_list;
++ int h_errno_val;
++ volatile int timer_id;
++ locale_t locale;
++ volatile int killlock[1];
++ char *dlerror_buf;
++ void *stdio_locks;
++
++ /* Part 3 -- the positions of these fields relative to
++ * the end of the structure is external and internal ABI. */
++#ifdef TLS_ABOVE_TP
++ uintptr_t canary;
++ uintptr_t *dtv;
++#endif
++};
++
++void MaybeUpdateMuslTidCache()
++{
++ pid_t real_tid = sys_gettid();
++ pid_t* cached_tid_location = &reinterpret_cast<struct musl_pthread*>(pthread_self())->tid;
++ *cached_tid_location = real_tid;
++}
++
+ // static
+ pid_t NamespaceSandbox::ForkInNewPidNamespace(bool drop_capabilities_in_child) {
+ const pid_t pid =
+@@ -226,6 +290,7 @@
+ #if defined(LIBC_GLIBC)
+ MaybeUpdateGlibcTidCache();
+ #endif
++ MaybeUpdateMuslTidCache();
+ return 0;
+ }
+
diff --git a/repo/qt5-qtwebengine/qt5-qtwebengine.xibuild b/repo/qt5-qtwebengine/qt5-qtwebengine.xibuild
new file mode 100644
index 0000000..833a12f
--- /dev/null
+++ b/repo/qt5-qtwebengine/qt5-qtwebengine.xibuild
@@ -0,0 +1,75 @@
+#!/bin/sh
+
+NAME="qt5-qtwebengine"
+DESC="Qt5 - QtWebEngine components"
+
+MAKEDEPS="ffmpeg glib gst-plugins-base gstreamer icu libxcomposite libxext libxkbcommon libxkbfile libxrender libxslt mesa ninja alsa-lib bison musl-legacy-compat flex fontconfig gperf gzip harfbuzz jsoncpp libevent libjpeg-turbo libpng libsrtp libvpx libwebp libxcursor libxi libxml2 libxrandr libxslt libxtst linux-headers nodejs nss opus pcre protobuf pulseaudio sndio qt5-qtbase qt5-qtdeclarative qt5-qtwebchannel re2 ruby snappy sqlite3 yasm zlib python"
+
+PKG_VER=5.15.3_git20220407
+_commit="56260bb605a74fabdfc74cef3bf890394af88b3d"
+_chromium_commit="d13d0924c4e18ecc4b79adf0fec142ee9a9eaa14"
+# commit of catapult version with python3 support
+_catapult_commit="5eedfe23148a234211ba477f76fc2ea2e8529189"
+SOURCE="https://invent.kde.org/qt/qt/qtwebengine/-/archive/$_commit.tar.gz"
+
+ADDITIONAL="
+ https://invent.kde.org/qt/qt/qtwebengine-chromium/-/archive/$_chromium_commit.tar.gz
+ https://dev.alpinelinux.org/archive/qt5-qtwebengine/catapult-$_catapult_commit.tar.gz
+0001-pretend-to-stay-at-5.15.3.patch
+0010-chromium-musl-Match-syscalls-to-match-musl.patch
+default-pthread-stacksize.patch
+ffmpeg5.patch
+fix-chromium-build.patch
+musl-hacks.patch
+musl-sandbox.patch
+nasm.patch
+qt-chromium-python3.patch
+qt-musl-crashpad.patch
+qt-musl-dispatch_to_musl.patch
+qt-musl-elf-arm.patch
+qt-musl-execinfo.patch
+qt-musl-mallinfo.patch
+qt-musl-off_t.patch
+qt-musl-pread-pwrite.patch
+qt-musl-pvalloc.patch
+qt-musl-resolve.patch
+qt-musl-siginfo_t.patch
+qt-musl-stackstart.patch
+qt-musl-sysreg-for__WORDSIZE.patch
+qt-musl-thread-stacksize.patch
+qt-musl-tid-caching.patch
+remove-glibc-check.patch
+sndio.patch
+support-python3.patch
+"
+
+prepare() {
+ tar xf *.tar.gz
+ rmdir "$BUILD_ROOT/src/3rdparty"
+ mv "$BUILD_ROOT/qtwebengine-chromium-$_chromium_commit" "$BUILD_ROOT/src/3rdparty"
+ mkdir "$BUILD_ROOT"/.git "$BUILD_ROOT/src/3rdparty/chromium/.git"
+
+ # update vendored catapult version to python3 compatible version.
+ rm -r "$BUILD_ROOT/src/3rdparty/chromium/third_party/catapult"
+ mv "$BUILD_ROOT"/catapult "$BUILD_ROOT/src/3rdparty/chromium/third_party/catapult"
+
+ apply_patches
+}
+
+build() {
+ qmake-qt5 QMAKE_EXTRA_ARGS+="-webengine-sndio -system-ffmpeg -system-opus -system-webp -proprietary-codecs" CONFIG+=force_debug_info
+ make
+}
+
+package() {
+ make install INSTALL_ROOT="$PKG_DEST"
+ sed -i -e 's:-L/home[^ ]\+::g' "$PKG_DEST"/usr/lib/pkgconfig/*.pc
+
+ # Drop QMAKE_PRL_BUILD_DIR because reference the build dir
+ find "$PKG_DEST/usr/lib" -type f -name '*.prl' \
+ -exec sed -i -e '/^QMAKE_PRL_BUILD_DIR/d' {} \;
+
+ install -d "$PKG_DEST"/usr/share/licenses
+ ln -s /usr/share/licenses/qt5-base "$PKG_DEST"/usr/share/licenses/qt5-qtwebengine
+}
+
diff --git a/repo/qt5-qtwebengine/remove-glibc-check.patch b/repo/qt5-qtwebengine/remove-glibc-check.patch
new file mode 100644
index 0000000..1d94b6b
--- /dev/null
+++ b/repo/qt5-qtwebengine/remove-glibc-check.patch
@@ -0,0 +1,78 @@
+Qt checks if glibc is available and if not disables large part of Qt5WebEngine and thus cripples functionality.
+However these parts work fine with Musl so there is no need to disable them.
+Just remove the check so it builds again. Since 5.15.1 is the last version of Qt to be released with this build system it'll be obsolete with Qt6 and there is no real point in fixing this upstream.
+
+
+diff --git a/src/buildtools/config/support.pri b/src/buildtools/config/support.pri
+index e7f869a1..de18523d 100644
+--- a/src/buildtools/config/support.pri
++++ b/src/buildtools/config/support.pri
+@@ -189,15 +189,6 @@ defineTest(qtwebengine_checkForHostPkgCfg) {
+ return(true)
+ }
+
+-defineTest(qtwebengine_checkForGlibc) {
+- module = $$1
+- !qtConfig(webengine-system-glibc) {
+- qtwebengine_skipBuild("A suitable version >= 2.27 of libc required to build $${module} could not be found.")
+- return(false)
+- }
+- return(true)
+-}
+-
+ defineTest(qtwebengine_checkForKhronos) {
+ module = $$1
+ !qtConfig(webengine-system-khr) {
+diff --git a/src/buildtools/configure.json b/src/buildtools/configure.json
+index 88d1790c..8623f6d7 100644
+--- a/src/buildtools/configure.json
++++ b/src/buildtools/configure.json
+@@ -264,18 +264,6 @@
+ "label": "system gn",
+ "type": "detectGn"
+ },
+- "webengine-glibc": {
+- "label": "glibc > 2.16",
+- "type": "compile",
+- "test": {
+- "include": "features.h",
+- "tail": [
+- "#if __GLIBC__ < 2 || __GLIBC_MINOR__ < 17",
+- "#error glibc versions below 2.17 are not supported",
+- "#endif"
+- ]
+- }
+- },
+ "webengine-gperf": {
+ "label": "gperf",
+ "type": "detectGperf"
+@@ -379,7 +367,6 @@
+ && (!config.sanitizer || features.webengine-sanitizer)
+ && (!config.linux || features.pkg-config)
+ && (!config.linux || features.webengine-host-pkg-config)
+- && (!config.linux || features.webengine-system-glibc)
+ && (!config.linux || features.webengine-system-khr)
+ && (!config.linux || features.webengine-system-nss)
+ && (!config.linux || features.webengine-system-dbus)
+@@ -517,11 +504,6 @@
+ "condition": "config.unix && !config.darwin && libs.webengine-nss",
+ "output": [ "privateFeature" ]
+ },
+- "webengine-system-glibc": {
+- "label": "glibc",
+- "condition": "config.linux && tests.webengine-glibc",
+- "output": [ "privateFeature" ]
+- },
+ "webengine-system-x11" : {
+ "label": "x11",
+ "condition": "config.unix && libs.webengine-x11",
+@@ -782,8 +764,7 @@
+ "webengine-system-fontconfig",
+ "webengine-system-dbus",
+ "webengine-system-nss",
+- "webengine-system-khr",
+- "webengine-system-glibc"
++ "webengine-system-khr"
+ ]
+ },
+ {
diff --git a/repo/qt5-qtwebengine/sndio.patch b/repo/qt5-qtwebengine/sndio.patch
new file mode 100644
index 0000000..771e630
--- /dev/null
+++ b/repo/qt5-qtwebengine/sndio.patch
@@ -0,0 +1,142 @@
+from void-packages, 0101,0102-sndio.patch
+--- a/src/core/configure.json 2020-03-24 10:16:30.000000000 +0100
++++ - 2020-04-06 14:28:00.591236926 +0200
+@@ -21,6 +21,7 @@
+ "webengine-printing-and-pdf": "boolean",
+ "webengine-proprietary-codecs": "boolean",
+ "webengine-pulseaudio": "boolean",
++ "webengine-sndio": "boolean",
+ "webengine-spellchecker": "boolean",
+ "webengine-native-spellchecker": "boolean",
+ "webengine-extensions": "boolean",
+@@ -31,6 +32,7 @@
+ "webengine-kerberos": "boolean",
+ "alsa": { "type": "boolean", "name": "webengine-alsa" },
+ "pulseaudio": { "type": "boolean", "name": "webengine-pulseaudio" },
++ "sndio": { "type": "boolean", "name": "webengine-sndio" },
+ "ffmpeg": { "type": "enum", "name": "webengine-system-ffmpeg", "values": { "system": "yes", "qt": "no" } },
+ "opus": { "type": "enum", "name": "webengine-system-opus", "values": { "system": "yes", "qt": "no" } },
+ "webp": { "type": "enum", "name": "webengine-system-libwebp", "values": { "system": "yes", "qt": "no" } },
+@@ -68,7 +70,13 @@
+ "sources": [
+ { "type": "pkgConfig", "args": "libpulse >= 0.9.10 libpulse-mainloop-glib" }
+ ]
+- }
++ },
++ "sndio": {
++ "label": "sndio",
++ "sources": [
++ { "type": "pkgConfig", "args": "libsndio >= 1.5.0 libsndio" }
++ ]
++ }
+ },
+ "tests" : {
+ "webengine-host-compiler": {
+@@ -136,6 +144,10 @@
+ "condition": "libs.webengine-pulseaudio",
+ "output": [ "privateFeature" ]
+ },
++ "webengine-sndio": {
++ "label": "Use sndio",
++ "output": [ "privateFeature" ]
++ },
+ "webengine-pepper-plugins": {
+ "label": "Pepper Plugins",
+ "purpose": "Enables use of Pepper Flash plugins.",
+@@ -308,6 +320,11 @@
+ "condition": "config.unix"
+ },
+ {
++ "type": "feature",
++ "args": "webengine-sndio",
++ "condition": "config.unix"
++ },
++ {
+ "type": "feature",
+ "args": "webengine-sanitizer",
+ "condition": "config.sanitizer"
+--- a/src/3rdparty/chromium/media/audio/linux/audio_manager_linux.cc
++++ b/src/3rdparty/chromium/media/audio/linux/audio_manager_linux.cc
+@@ -20,6 +20,10 @@
+ #include "media/audio/pulse/audio_manager_pulse.h"
+ #include "media/audio/pulse/pulse_util.h"
+ #endif
++#if defined(USE_SNDIO)
++#include <sndio.h>
++#include "media/audio/openbsd/audio_manager_openbsd.h"
++#endif
+
+ namespace media {
+
+@@ -27,7 +31,8 @@ enum LinuxAudioIO {
+ kPulse,
+ kAlsa,
+ kCras,
+- kAudioIOMax = kCras // Must always be equal to largest logged entry.
++ kSndio,
++ kAudioIOMax = kSndio // Must always be equal to largest logged entry.
+ };
+
+ std::unique_ptr<media::AudioManager> CreateAudioManager(
+@@ -41,6 +46,17 @@ std::unique_ptr<media::AudioManager> CreateAudioManager(
+ }
+ #endif
+
++#if defined(USE_SNDIO)
++ struct sio_hdl * hdl = NULL;
++ if ((hdl=sio_open(SIO_DEVANY, SIO_PLAY, 1)) != NULL) {
++ sio_close(hdl);
++ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kSndio, kAudioIOMax +1);
++ return std::make_unique<AudioManagerOpenBSD>(std::move(audio_thread),
++ audio_log_factory);
++ }
++ DVLOG(1) << "Sndio is not available on the OS";
++#endif
++
+ #if defined(USE_PULSEAUDIO)
+ pa_threaded_mainloop* pa_mainloop = nullptr;
+ pa_context* pa_context = nullptr;
+--- a/src/3rdparty/chromium/media/BUILD.gn 2020-03-24 10:16:30.000000000 +0100
++++ - 2020-04-06 14:32:27.960817513 +0200
+@@ -65,6 +65,9 @@
+ if (use_cras) {
+ defines += [ "USE_CRAS" ]
+ }
++ if (use_sndio) {
++ defines += [ "USE_SNDIO" ]
++ }
+ }
+
+ # Internal grouping of the configs necessary to support sub-folders having their
+--- a/src/3rdparty/chromium/media/media_options.gni 2020-03-24 10:16:30.000000000 +0100
++++ - 2020-04-06 14:29:22.958630783 +0200
+@@ -114,6 +114,9 @@
+ # Enables runtime selection of ALSA library for audio.
+ use_alsa = false
+
++ # Enables runtime selection of sndio library for audio.
++ use_sndio = false
++
+ # Alsa should be used on non-Android, non-Mac POSIX systems.
+ # Alsa should be used on desktop Chromecast and audio-only Chromecast builds.
+ if (is_posix && !is_android && !is_mac &&
+--- a/src/3rdparty/chromium/media/audio/BUILD.gn 2021-02-23 16:36:59.000000000 +0100
++++ - 2021-03-07 22:00:34.889682069 +0100
+@@ -238,6 +238,17 @@
+ sources += [ "linux/audio_manager_linux.cc" ]
+ }
+
++ if (use_sndio) {
++ libs += [ "sndio" ]
++ sources += [
++ "openbsd/audio_manager_openbsd.cc",
++ "sndio/sndio_input.cc",
++ "sndio/sndio_input.h",
++ "sndio/sndio_output.cc",
++ "sndio/sndio_output.h"
++ ]
++ }
++
+ if (use_alsa) {
+ libs += [ "asound" ]
+ sources += [
diff --git a/repo/qt5-qtwebengine/support-python3.patch b/repo/qt5-qtwebengine/support-python3.patch
new file mode 100644
index 0000000..cc5e457
--- /dev/null
+++ b/repo/qt5-qtwebengine/support-python3.patch
@@ -0,0 +1,158 @@
+diff -upr a/configure.pri b/configure.pri
+--- a/configure.pri 2022-01-21 11:47:42.000000000 +0100
++++ b/configure.pri 2022-03-08 18:45:03.650823421 +0100
+@@ -7,20 +7,7 @@ QTWEBENGINE_SOURCE_TREE = $$PWD
+ equals(QMAKE_HOST.os, Windows): EXE_SUFFIX = .exe
+
+ defineTest(isPythonVersionSupported) {
+- python = $$system_quote($$system_path($$1))
+- python_version = $$system('$$python -c "import sys; print(sys.version_info[0:3])"')
+- python_version ~= s/[()]//g
+- python_version = $$split(python_version, ',')
+- python_major_version = $$first(python_version)
+- greaterThan(python_major_version, 2) {
+- qtLog("Python version 3 is not supported by Chromium.")
+- return(false)
+- }
+- python_minor_version = $$member(python_version, 1)
+- python_patch_version = $$member(python_version, 2)
+- greaterThan(python_major_version, 1): greaterThan(python_minor_version, 6): greaterThan(python_patch_version, 4): return(true)
+- qtLog("Unsupported python version: $${python_major_version}.$${python_minor_version}.$${python_patch_version}.")
+- return(false)
++ return(true)
+ }
+
+ defineTest(qtConfTest_detectJumboBuild) {
+@@ -52,22 +39,22 @@ defineTest(qtConfReport_jumboBuild) {
+ qtConfReportPadded($${1}, $$mergeLimit)
+ }
+
+-defineTest(qtConfTest_detectPython2) {
+- python = $$qtConfFindInPath("python2$$EXE_SUFFIX")
++defineTest(qtConfTest_detectPython) {
++ python = $$qtConfFindInPath("python3$$EXE_SUFFIX")
+ isEmpty(python) {
+- qtLog("'python2$$EXE_SUFFIX' not found in PATH. Checking for 'python$$EXE_SUFFIX'.")
++ qtLog("'python3$$EXE_SUFFIX' not found in PATH. Checking for 'python$$EXE_SUFFIX'.")
+ python = $$qtConfFindInPath("python$$EXE_SUFFIX")
+ }
+ isEmpty(python) {
+- qtLog("'python$$EXE_SUFFIX' not found in PATH. Giving up.")
++ qtLog("'python3$$EXE_SUFFIX' and 'python$$EXE_SUFFIX' not found in PATH. Giving up.")
+ return(false)
+ }
+ !isPythonVersionSupported($$python) {
+- qtLog("A suitable Python 2 executable could not be located.")
++ qtLog("A suitable Python executable could not be located.")
+ return(false)
+ }
+
+- # Make tests.python2.location available in configure.json.
++ # Make tests.python.location available in configure.json.
+ $${1}.location = $$clean_path($$python)
+ export($${1}.location)
+ $${1}.cache += location
+diff -upr a/mkspecs/features/functions.prf b/mkspecs/features/functions.prf
+--- a/mkspecs/features/functions.prf 2022-01-21 11:47:42.000000000 +0100
++++ b/mkspecs/features/functions.prf 2022-03-08 18:45:34.740851611 +0100
+@@ -39,11 +39,11 @@ defineReplace(which) {
+
+ # Returns the unquoted path to the python executable.
+ defineReplace(pythonPath) {
+- isEmpty(QMAKE_PYTHON2) {
++ isEmpty(QMAKE_PYTHON) {
+ # Fallback for building QtWebEngine with Qt < 5.8
+- QMAKE_PYTHON2 = python
++ QMAKE_PYTHON = python3
+ }
+- return($$QMAKE_PYTHON2)
++ return($$QMAKE_PYTHON)
+ }
+
+ # Returns the python executable for use with shell / make targets.
+diff -upr a/src/buildtools/config/support.pri b/src/buildtools/config/support.pri
+--- a/src/buildtools/config/support.pri 2022-01-21 11:47:42.000000000 +0100
++++ b/src/buildtools/config/support.pri 2022-03-08 18:44:25.677455634 +0100
+@@ -21,7 +21,7 @@ defineReplace(qtwebengine_checkWebEngine
+ !qtwebengine_checkForGperf(QtWebEngine):return(false)
+ !qtwebengine_checkForBison(QtWebEngine):return(false)
+ !qtwebengine_checkForFlex(QtWebEngine):return(false)
+- !qtwebengine_checkForPython2(QtWebEngine):return(false)
++ !qtwebengine_checkForPython(QtWebEngine):return(false)
+ !qtwebengine_checkForNodejs(QtWebEngine):return(false)
+ !qtwebengine_checkForSanitizer(QtWebEngine):return(false)
+ linux:!qtwebengine_checkForPkgCfg(QtWebEngine):return(false)
+@@ -51,7 +51,7 @@ defineReplace(qtwebengine_checkPdfError)
+ !qtwebengine_checkForGperf(QtPdf):return(false)
+ !qtwebengine_checkForBison(QtPdf):return(false)
+ !qtwebengine_checkForFlex(QtPdf):return(false)
+- !qtwebengine_checkForPython2(QtPdf):return(false)
++ !qtwebengine_checkForPython(QtPdf):return(false)
+ !qtwebengine_checkForSanitizer(QtPdf):return(false)
+ linux:!qtwebengine_checkForPkgCfg(QtPdf):return(false)
+ linux:!qtwebengine_checkForHostPkgCfg(QtPdf):return(false)
+@@ -147,10 +147,10 @@ defineTest(qtwebengine_checkForFlex) {
+ return(true)
+ }
+
+-defineTest(qtwebengine_checkForPython2) {
++defineTest(qtwebengine_checkForPython) {
+ module = $$1
+- !qtConfig(webengine-python2) {
+- qtwebengine_skipBuild("Python version 2 (2.7.5 or later) is required to build $${module}.")
++ !qtConfig(webengine-python) {
++ qtwebengine_skipBuild("Python is required to build $${module}.")
+ return(false)
+ }
+ return(true)
+diff -upr a/src/buildtools/configure.json b/src/buildtools/configure.json
+--- a/src/buildtools/configure.json 2022-01-21 11:47:42.000000000 +0100
++++ b/src/buildtools/configure.json 2022-03-08 18:44:25.677455634 +0100
+@@ -295,9 +295,9 @@
+ "label": "system ninja",
+ "type": "detectNinja"
+ },
+- "webengine-python2": {
+- "label": "python2",
+- "type": "detectPython2",
++ "webengine-python": {
++ "label": "python",
++ "type": "detectPython",
+ "log": "location"
+ },
+ "webengine-winversion": {
+@@ -374,7 +374,7 @@
+ && features.webengine-gperf
+ && features.webengine-bison
+ && features.webengine-flex
+- && features.webengine-python2
++ && features.webengine-python
+ && features.webengine-nodejs
+ && (!config.sanitizer || features.webengine-sanitizer)
+ && (!config.linux || features.pkg-config)
+@@ -400,7 +400,7 @@
+ && features.webengine-gperf
+ && features.webengine-bison
+ && features.webengine-flex
+- && features.webengine-python2
++ && features.webengine-python
+ && (!config.sanitizer || features.webengine-sanitizer)
+ && (!config.linux || features.pkg-config)
+ && (!config.linux || features.webengine-host-pkg-config)
+@@ -423,12 +423,12 @@
+ "autoDetect": "features.private_tests",
+ "output": [ "privateFeature" ]
+ },
+- "webengine-python2": {
+- "label": "python2",
+- "condition": "tests.webengine-python2",
++ "webengine-python": {
++ "label": "python",
++ "condition": "tests.webengine-python",
+ "output": [
+ "privateFeature",
+- { "type": "varAssign", "name": "QMAKE_PYTHON2", "value": "tests.webengine-python2.location" }
++ { "type": "varAssign", "name": "QMAKE_PYTHON", "value": "tests.webengine-python.location" }
+ ]
+ },
+ "webengine-gperf": {
diff --git a/repo/qt5-qtwebsockets/qt5-qtwebsockets.xibuild b/repo/qt5-qtwebsockets/qt5-qtwebsockets.xibuild
new file mode 100644
index 0000000..ce49be0
--- /dev/null
+++ b/repo/qt5-qtwebsockets/qt5-qtwebsockets.xibuild
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+NAME="qt5-qtwebsockets"
+DESC="Provides WebSocket communication compliant with RFC 6455"
+
+MAKEDEPS=" qt5-qtbase qt5-qtdeclarative"
+
+PKG_VER=5.15.3_git20201221
+_commit="e7883bc64440b1ff4666272ac6eb710ee4bc221b"
+SOURCE="https://invent.kde.org/qt/qt/qtwebsockets/-/archive/$_commit/qtwebsockets-$_commit.tar.gz"
+
+prepare() {
+ mkdir .git
+}
+
+build() {
+ qmake-qt5
+ make
+}
+
+package() {
+ make INSTALL_ROOT="$PKG_DEST" install
+
+ # Drop QMAKE_PRL_BUILD_DIR because reference the build dir
+ find "$PKG_DEST/usr/lib" -type f -name '*.prl' \
+ -exec sed -i -e '/^QMAKE_PRL_BUILD_DIR/d' {} \;
+
+ install -d "$PKG_DEST"/usr/share/licenses
+ ln -s /usr/share/licenses/qt5-base "$PKG_DEST"/usr/share/licenses/qt5-qtwebsockets
+}
+
diff --git a/repo/raylib/raylib.xibuild b/repo/raylib/raylib.xibuild
new file mode 100644
index 0000000..da1feac
--- /dev/null
+++ b/repo/raylib/raylib.xibuild
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+NAME="raylib"
+DESC="a simple and easy to use game development library"
+
+MAKEDEPS="cmake glfw"
+
+PKG_VER=4.0.0
+SOURCE="https://github.com/raysan5/raylib/archive/refs/tags/$PKG_VER.tar.gz"
+
+build() {
+ cmake -B build \
+ -DBUILD_EXAMPLES=Off \
+ -DBUILD_SHARED_LIBS=True \
+ -DCMAKE_BUILD_TYPE=None \
+ -DCMAKE_INSTALL_LIBDIR=lib \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DUSE_EXTERNAL_GLFW=ON \
+ $CMAKE_CROSSOPTS .
+ cmake --build build
+}
+
+package() {
+ DESTDIR="$PKG_DEST" cmake --install build
+}
+
diff --git a/repo/rpcsvc-proto/rpcsvc-proto.xibuild b/repo/rpcsvc-proto/rpcsvc-proto.xibuild
new file mode 100644
index 0000000..e6b307b
--- /dev/null
+++ b/repo/rpcsvc-proto/rpcsvc-proto.xibuild
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+NAME="rpcsvc-proto"
+DESC="rpcsvc protocol definitions from glibc"
+
+MAKEDEPS="intltool"
+
+PKG_VER=1.4.3
+SOURCE="https://github.com/thkukuk/rpcsvc-proto/releases/download/v$PKG_VER/rpcsvc-proto-$PKG_VER.tar.xz"
+
+build() {
+ ./configure --prefix=/usr
+ make
+}
+
+check() {
+ make check
+}
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+}
+
diff --git a/repo/ruby-rake/ruby-rake.xibuild b/repo/ruby-rake/ruby-rake.xibuild
new file mode 100644
index 0000000..63352fd
--- /dev/null
+++ b/repo/ruby-rake/ruby-rake.xibuild
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+NAME="ruby-rake"
+DESC="A Ruby task runner, inspired by make"
+
+MAKEDEPS=""
+
+PKG_VER=13.0.6
+SOURCE="https://github.com/ruby/rake/archive/v$PKG_VER/rake-$PKG_VER.tar.gz"
+
+build() {
+ gem build rake.gemspec
+}
+
+package() {
+ local gemdir="$PKG_DEST/$(ruby -e 'puts Gem.default_dir')"
+
+ gem install \
+ --local \
+ --install-dir "$gemdir" \
+ --bindir "$PKG_DEST/usr/bin" \
+ --ignore-dependencies \
+ --document ri \
+ --verbose \
+ rake
+
+ # Remove unnessecary files
+ cd "$gemdir"
+ rm -rf build_info cache extensions plugins
+
+ cd gems/rake-*
+ rm -rf doc *.rdoc MIT-LICENSE
+}
+
diff --git a/repo/tcsh/001-sysmalloc.patch b/repo/tcsh/001-sysmalloc.patch
new file mode 100644
index 0000000..b22c018
--- /dev/null
+++ b/repo/tcsh/001-sysmalloc.patch
@@ -0,0 +1,15 @@
+--- a/config_f.h
++++ b/config_f.h
+@@ -139,11 +139,8 @@
+ * This can be much slower and no memory statistics will be
+ * provided.
+ */
+-#if defined(__MACHTEN__) || defined(PURIFY) || defined(MALLOC_TRACE) || defined(_OSD_POSIX) || defined(__MVS__) || defined (__CYGWIN__) || defined(__GLIBC__) || defined(__OpenBSD__) || defined(__APPLE__) || defined (__ANDROID__)
++
+ # define SYSMALLOC
+-#else
+-# undef SYSMALLOC
+-#endif
+
+ /*
+ * USE_ACCESS Use access(2) rather than stat(2) when POSIX is defined.
diff --git a/repo/tcsh/6974bc35a5cda6eab748e364bd76a860ca66968b.patch b/repo/tcsh/6974bc35a5cda6eab748e364bd76a860ca66968b.patch
new file mode 100644
index 0000000..b5a0cf0
--- /dev/null
+++ b/repo/tcsh/6974bc35a5cda6eab748e364bd76a860ca66968b.patch
@@ -0,0 +1,22 @@
+From 6974bc35a5cda6eab748e364bd76a860ca66968b Mon Sep 17 00:00:00 2001
+From: zoulasc <christos@zoulas.com>
+Date: Sat, 11 Jan 2020 11:16:51 -0500
+Subject: [PATCH] Remove extra variable definition that cause -fno-common build
+ to fail (Werner Fink)
+
+---
+ tc.sig.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/tc.sig.c b/tc.sig.c
+index 77659ca..576605a 100644
+--- a/tc.sig.c
++++ b/tc.sig.c
+@@ -56,7 +56,6 @@ int alrmcatch_disabled; /* = 0; */
+ int phup_disabled; /* = 0; */
+ int pchild_disabled; /* = 0; */
+ int pintr_disabled; /* = 0; */
+-int handle_interrupt; /* = 0; */
+
+ int
+ handle_pending_signals(void)
diff --git a/repo/tcsh/csh.cshrc b/repo/tcsh/csh.cshrc
new file mode 100644
index 0000000..5f53824
--- /dev/null
+++ b/repo/tcsh/csh.cshrc
@@ -0,0 +1,96 @@
+#############################################################################
+##
+## Gentoo's csh.cshrc
+##
+## Based on the TCSH package (http://tcshrc.sourceforge.net)
+##
+## .tcshrc 2Sep2001, Simos Xenitellis (simos@hellug.gr)
+##
+## 2003-01-13 -- Alain Penders (alain@gentoo.org)
+## Renamed to /etc/csh.cshrc, basic cleanup work.
+##
+## 2003-01-24 -- Alain Penders (alain@gentoo.org)
+## Improved config file handling.
+##
+onintr -
+##
+
+##
+## Load the environment defaults.
+##
+if ( -r /etc/csh.env ) then
+ source /etc/csh.env
+endif
+
+
+##
+## Make sure our path includes the basic stuff for root and normal users.
+##
+if ($LOGNAME == "root") then
+ set -f path = ( $path /sbin )
+ set -f path = ( $path /usr/sbin )
+ set -f path = ( $path /usr/local/sbin )
+endif
+set -f path = ( $path /bin )
+set -f path = ( $path /usr/bin )
+set -f path = ( $path /usr/local/bin )
+set -f path = ( $path /opt/bin )
+
+
+##
+## Load our settings -- most are for interactive shells only, but not all.
+##
+if ( -e /etc/profile.d/tcsh-settings ) then
+ source /etc/profile.d/tcsh-settings
+endif
+
+
+##
+## Source extensions installed by ebuilds
+##
+if ( -d /etc/profile.d ) then
+ set _tmp=${?nonomatch}
+ set nonomatch
+ foreach _s ( /etc/profile.d/*.csh )
+ if ( -r $_s ) then
+ source $_s
+ endif
+ end
+ if ( ! ${_tmp} ) unset nonomatch
+ unset _tmp _s
+endif
+
+
+# Everything after this point is interactive shells only.
+if ( $?prompt == 0 ) goto end
+
+
+##
+## Load our aliases -- for interactive shells only
+##
+if ( -e /etc/profile.d/tcsh-aliases ) then
+ source /etc/profile.d/tcsh-aliases
+endif
+
+
+##
+## Load our key bindings -- for interactive shells only
+##
+if ( -e /etc/profile.d/tcsh-bindkey ) then
+ source /etc/profile.d/tcsh-bindkey
+endif
+
+
+##
+## Load our command completions -- for interactive shells only
+##
+if ( -e /etc/profile.d/tcsh-complete ) then
+ source /etc/profile.d/tcsh-complete
+endif
+
+
+end:
+##
+onintr
+##
+
diff --git a/repo/tcsh/csh.login b/repo/tcsh/csh.login
new file mode 100644
index 0000000..6868228
--- /dev/null
+++ b/repo/tcsh/csh.login
@@ -0,0 +1,71 @@
+#############################################################################
+##
+## Gentoo's csh.login
+##
+## 2003-01-13 -- Alain Penders (alain@gentoo.org)
+##
+## Initial version. Inspired by the Suse version.
+##
+
+
+##
+## Default terminal initialization
+##
+if ( -o /dev/$tty && ${?prompt} ) then
+ # Console
+ if ( ! ${?TERM} ) setenv TERM linux
+ if ( "$TERM" == "unknown" ) setenv TERM linux
+ # No tset available on SlackWare
+ if ( -x "`which stty`" ) stty sane cr0 pass8 dec
+ if ( -x "`which tset`" ) tset -I -Q
+ unsetenv TERMCAP
+ settc km yes
+endif
+
+##
+## Default UMASK
+##
+umask 022
+
+##
+## Set our SHELL variable.
+##
+setenv SHELL /bin/tcsh
+
+##
+## Setup a default MAIL variable
+##
+if ( -f /var/mail/$USER ) then
+ setenv MAIL /var/mail/$USER
+ set mail=$MAIL
+endif
+
+##
+## If we're root, report who's logging in and out.
+## disabled because musl libc doesn't support utmp
+##if ( "$uid" == "0" ) then
+## set who=( "%n has %a %l from %M." )
+## set watch=( any any ) #
+##endif
+
+##
+## Show the MOTD once the first time, and once after it has been changed.
+##
+## Note: if this is a SSH login, SSH will always show the MOTD, so we
+## skip it. Create ~/.hushlogin is you don't want SSH to show it.
+##
+if (-f /etc/motd ) then
+ if ( ! $?SSH_CLIENT ) then
+ cmp -s /etc/motd ~/.hushmotd
+ if ($status) then
+ tee ~/.hushmotd < /etc/motd
+ echo "((( MOTD shown only once, unless it is changed )))"
+ endif
+ endif
+endif
+
+##
+## Send us home.
+##
+cd
+
diff --git a/repo/tcsh/tcsh.post-install b/repo/tcsh/tcsh.post-install
new file mode 100644
index 0000000..33c7ebe
--- /dev/null
+++ b/repo/tcsh/tcsh.post-install
@@ -0,0 +1,4 @@
+#!/bin/sh
+add-shell '/bin/tcsh'
+add-shell '/bin/csh'
+exit 0
diff --git a/repo/tcsh/tcsh.post-upgrade b/repo/tcsh/tcsh.post-upgrade
new file mode 100644
index 0000000..33c7ebe
--- /dev/null
+++ b/repo/tcsh/tcsh.post-upgrade
@@ -0,0 +1,4 @@
+#!/bin/sh
+add-shell '/bin/tcsh'
+add-shell '/bin/csh'
+exit 0
diff --git a/repo/tcsh/tcsh.pre-deinstall b/repo/tcsh/tcsh.pre-deinstall
new file mode 100644
index 0000000..3c0b05c
--- /dev/null
+++ b/repo/tcsh/tcsh.pre-deinstall
@@ -0,0 +1,4 @@
+#!/bin/sh
+remove-shell '/bin/csh'
+remove-shell '/bin/tcsh'
+exit 0
diff --git a/repo/tcsh/tcsh.xibuild b/repo/tcsh/tcsh.xibuild
new file mode 100644
index 0000000..9bca519
--- /dev/null
+++ b/repo/tcsh/tcsh.xibuild
@@ -0,0 +1,52 @@
+#!/bin/sh
+
+NAME="tcsh"
+DESC="extended C-shell"
+
+MAKEDEPS="ncurses"
+
+PKG_VER=6.24.00
+SOURCE="https://github.com/tcsh-org/tcsh/archive/TCSH6_21_00.tar.gz"
+
+ADDITIONAL="
+001-sysmalloc.patch
+6974bc35a5cda6eab748e364bd76a860ca66968b.patch
+csh.cshrc
+csh.login
+tcsh.post-install
+"
+
+prepare() {
+ apply_patches
+}
+
+build() {
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --bindir=/bin \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --disable-rpath
+ make
+}
+
+
+package() {
+ make DESTDIR="$PKG_DEST" install
+ install -Dm0644 "$BUILD_ROOT/csh.cshrc" "$PKG_DEST/etc/csh.cshrc"
+ install -Dm0644 "$BUILD_ROOT/csh.login" "$PKG_DEST/etc/csh.login"
+ ln -s tcsh "$PKG_DEST/bin/csh"
+ ln -s tcsh.1 "$PKG_DEST/usr/share/man/man1/csh.1"
+ rm -rf "$PKG_DEST"/usr/share/locale
+}
+
+postinstall () {
+ if [ -f /etc/shells ] || grep -vq "tcsh" /etc/shells; then
+ echo "/bin/tcsh" >> /etc/shells
+ fi
+
+ if [ -f /etc/shells ] || grep -vq "csh" /etc/shells; then
+ echo "/bin/csh" >> /etc/shells
+ fi
+}
diff --git a/repo/virt-manager/fix-latest-libvirt-xml-output.patch b/repo/virt-manager/fix-latest-libvirt-xml-output.patch
new file mode 100644
index 0000000..f81c44d
--- /dev/null
+++ b/repo/virt-manager/fix-latest-libvirt-xml-output.patch
@@ -0,0 +1,108 @@
+From 87ce425197c2d6bfaf675dc4c0f92a2f615a39a5 Mon Sep 17 00:00:00 2001
+From: Cole Robinson <crobinso@redhat.com>
+Date: Tue, 6 Apr 2021 15:21:00 -0400
+Subject: [PATCH] tests: storage: Fix with latest libvirt XML output
+Patch-Source: https://github.com/virt-manager/virt-manager/commit/87ce425197c2d6bfaf675dc4c0f92a2f615a39a5
+
+Signed-off-by: Cole Robinson <crobinso@redhat.com>
+---
+ tests/data/storage/pool-fs-volclone.xml | 2 --
+ tests/data/storage/pool-logical-volclone.xml | 2 --
+ tests/data/storage/pool-netfs-volclone.xml | 2 --
+ tests/test_storage.py | 11 ++++++-----
+ 6 files changed, 6 insertions(+), 15 deletions(-)
+
+diff --git a/tests/data/storage/pool-fs-volclone.xml b/tests/data/storage/pool-fs-volclone.xml
+index 34e8cd088..6c7d3b9dd 100644
+--- a/tests/data/storage/pool-fs-volclone.xml
++++ b/tests/data/storage/pool-fs-volclone.xml
+@@ -1,8 +1,6 @@
+ <volume type="file">
+ <name>pool-fs-volclone</name>
+ <key>/var/lib/libvirt/images/pool-fs/pool-fs-vol</key>
+- <source>
+- </source>
+ <capacity unit="bytes">10737418240</capacity>
+ <allocation unit="bytes">5368709120</allocation>
+ <target>
+diff --git a/tests/data/storage/pool-logical-volclone.xml b/tests/data/storage/pool-logical-volclone.xml
+index c012017ef..8abcfa64c 100644
+--- a/tests/data/storage/pool-logical-volclone.xml
++++ b/tests/data/storage/pool-logical-volclone.xml
+@@ -1,8 +1,6 @@
+ <volume type="file">
+ <name>pool-logical-volclone</name>
+ <key>/dev/pool-logical/pool-logical-vol</key>
+- <source>
+- </source>
+ <capacity unit="bytes">10737418240</capacity>
+ <allocation unit="bytes">10737418240</allocation>
+ <target>
+diff --git a/tests/data/storage/pool-netfs-volclone.xml b/tests/data/storage/pool-netfs-volclone.xml
+index b6b39f79d..b7fd26507 100644
+--- a/tests/data/storage/pool-netfs-volclone.xml
++++ b/tests/data/storage/pool-netfs-volclone.xml
+@@ -1,8 +1,6 @@
+ <volume type="file">
+ <name>pool-netfs-volclone</name>
+ <key>/var/lib/libvirt/images/pool-netfs/pool-netfs-vol</key>
+- <source>
+- </source>
+ <capacity unit="bytes">10737418240</capacity>
+ <allocation unit="bytes">5368709120</allocation>
+ <target>
+diff --git a/tests/test_storage.py b/tests/test_storage.py
+index ee33ab32e..ba0e4e711 100644
+--- a/tests/test_storage.py
++++ b/tests/test_storage.py
+@@ -65,11 +65,6 @@ def createVol(conn, poolobj, volname=None, input_vol=None, clone_vol=None):
+ if volname is None:
+ volname = poolobj.name() + "-vol"
+
+- # Format here depends on libvirt-1.2.0 and later
+- if clone_vol and conn.local_libvirt_version() < 1002000:
+- log.debug("skip clone compare")
+- return
+-
+ alloc = 5 * 1024 * 1024 * 1024
+ cap = 10 * 1024 * 1024 * 1024
+ vol_inst = StorageVolume(conn)
+@@ -91,6 +86,12 @@ def createVol(conn, poolobj, volname=None, input_vol=None, clone_vol=None):
+
+ vol_inst.validate()
+ filename = os.path.join(BASEPATH, vol_inst.name + ".xml")
++
++ # Format here depends on libvirt-7.2.0 and later
++ if clone_vol and conn.local_libvirt_version() < 7002000:
++ log.debug("skip clone compare")
++ return
++
+ utils.diff_compare(vol_inst.get_xml(), filename)
+ return vol_inst.install(meter=False)
+
+diff --git a/tests/data/storage/pool-disk-volclone.xml b/tests/data/storage/pool-disk-volclone.xml
+index a4e9c3d..24f71a1 100644
+--- a/tests/data/storage/pool-disk-volclone.xml
++++ b/tests/data/storage/pool-disk-volclone.xml
+@@ -1,8 +1,6 @@
+ <volume type="file">
+ <name>pool-disk-volclone</name>
+ <key>/dev/pool-disk-vol</key>
+- <source>
+- </source>
+ <capacity unit="bytes">10737418240</capacity>
+ <allocation unit="bytes">5368709120</allocation>
+ <target>
+diff --git a/tests/data/storage/pool-dir-volclone.xml b/tests/data/storage/pool-dir-volclone.xml
+index c8bde66..16f1e56 100644
+--- a/tests/data/storage/pool-dir-volclone.xml
++++ b/tests/data/storage/pool-dir-volclone.xml
+@@ -1,8 +1,6 @@
+ <volume type="file">
+ <name>pool-dir-volclone</name>
+ <key>/var/lib/libvirt/images/pool-dir/pool-dir-vol</key>
+- <source>
+- </source>
+ <capacity unit="bytes">10737418240</capacity>
+ <allocation unit="bytes">5368709120</allocation>
+ <target>
diff --git a/repo/virt-manager/tests-remove-sgio-unfiltered.patch b/repo/virt-manager/tests-remove-sgio-unfiltered.patch
new file mode 100644
index 0000000..905dc73
--- /dev/null
+++ b/repo/virt-manager/tests-remove-sgio-unfiltered.patch
@@ -0,0 +1,65 @@
+diff --git a/tests/data/cli/compare/virt-install-many-devices.xml b/tests/data/cli/compare/virt-install-many-devices.xml
+index 49e9dcc..cf48427 100644
+--- a/tests/data/cli/compare/virt-install-many-devices.xml
++++ b/tests/data/cli/compare/virt-install-many-devices.xml
+@@ -71,7 +71,7 @@
+ <source file="/var/lib/libvirt/images/disk.qcow2"/>
+ <target dev="vdc" bus="virtio"/>
+ </disk>
+- <disk type="block" device="lun" sgio="unfiltered" rawio="yes">
++ <disk type="block" device="lun" sgio="filtered" rawio="yes">
+ <driver name="qemu" type="raw"/>
+ <source dev="/iscsi-pool/diskvol1"/>
+ <target dev="sdab" bus="scsi"/>
+diff --git a/tests/data/testdriver/testdriver.xml b/tests/data/testdriver/testdriver.xml
+index ea90f0f..beed8f0 100644
+--- a/tests/data/testdriver/testdriver.xml
++++ b/tests/data/testdriver/testdriver.xml
+@@ -245,7 +245,7 @@ Foo bar baz &amp; yeah boii &lt; &gt; yeahfoo
+ <shareable/>
+ </disk>
+
+- <disk type='block' device='lun' rawio='no' sgio='unfiltered'>
++ <disk type='block' device='lun' rawio='no'>
+ <driver name='qemu' type='raw'/>
+ <source dev='/dev/szz'>
+ <reservations managed="yes"/>
+diff --git a/tests/data/xmlparse/change-disk-out.xml b/tests/data/xmlparse/change-disk-out.xml
+index f65a1bc..263c9f9 100644
+--- a/tests/data/xmlparse/change-disk-out.xml
++++ b/tests/data/xmlparse/change-disk-out.xml
+@@ -37,7 +37,7 @@
+ <disk type="file" device="floppy">
+ <target dev="fde" bus="fdc"/>
+ </disk>
+- <disk type="block" device="lun" sgio="unfiltered" rawio="yes">
++ <disk type="block" device="lun">
+ <driver name="qemu" type="raw"/>
+ <source dev="/dev/sda"/>
+ <target dev="hdd" bus="scsi"/>
+diff --git a/tests/test_cli.py b/tests/test_cli.py
+index 5e69a13..72ff4df 100644
+--- a/tests/test_cli.py
++++ b/tests/test_cli.py
+@@ -590,7 +590,7 @@ vcpus.vcpu1.id=2,vcpus.vcpu1.enabled=yes
+ --disk source.file=%(NEWIMG1)s,sparse=false,size=.001,perms=ro,error_policy=enospace,discard=unmap,detect_zeroes=unmap,address.type=drive,address.controller=0,address.target=2,address.unit=0
+ --disk device=cdrom,bus=sata,read_bytes_sec=1,read_iops_sec=2,write_bytes_sec=5,write_iops_sec=6,driver.copy_on_read=on,geometry.cyls=16383,geometry.heads=16,geometry.secs=63,geometry.trans=lba
+ --disk size=1
+---disk /iscsi-pool/diskvol1,total_bytes_sec=10,total_iops_sec=20,bus=scsi,device=lun,sgio=unfiltered,rawio=yes
++--disk /iscsi-pool/diskvol1,total_bytes_sec=10,total_iops_sec=20,bus=scsi,device=lun,sgio=filtered,rawio=yes
+ --disk /dev/default-pool/iso-vol,seclabel.model=dac,seclabel1.model=selinux,seclabel1.relabel=no,seclabel0.label=foo,bar,baz,iotune.read_bytes_sec=1,iotune.read_iops_sec=2,iotune.write_bytes_sec=5,iotune.write_iops_sec=6
+ --disk /dev/default-pool/iso-vol,format=qcow2,startup_policy=optional,iotune.total_bytes_sec=10,iotune.total_iops_sec=20,
+ --disk source_pool=rbd-ceph,source_volume=some-rbd-vol,size=.1,driver_type=raw
+diff --git a/tests/test_xmlparse.py b/tests/test_xmlparse.py
+index 6d1aadd..116fd35 100644
+--- a/tests/test_xmlparse.py
++++ b/tests/test_xmlparse.py
+@@ -311,8 +311,6 @@ def testAlterDisk():
+ check = _make_checker(disk)
+ check("type", "block")
+ check("device", "lun")
+- check("sgio", None, "unfiltered")
+- check("rawio", None, "yes")
+
+ disk = _get_disk("sda")
+ check = _make_checker(disk)
diff --git a/repo/virt-manager/virt-manager.xibuild b/repo/virt-manager/virt-manager.xibuild
new file mode 100644
index 0000000..8397bbd
--- /dev/null
+++ b/repo/virt-manager/virt-manager.xibuild
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+NAME="virt-manager"
+DESC="GUI for managing virtual machines"
+
+MAKEDEPS="intltool glib python-docutils"
+DEPS="libvirt"
+
+PKG_VER=3.2.0
+SOURCE="https://releases.pagure.org/virt-manager/virt-manager-$PKG_VER.tar.gz"
+
+ADDITIONAL="
+fix-latest-libvirt-xml-output.patch
+tests-remove-sgio-unfiltered.patch
+"
+
+prepare () {
+ apply_patches
+}
+
+build() {
+ python3 setup.py build
+}
+
+
+package() {
+ python3 setup.py --no-update-icon-cache --no-compile-schemas install --root "$PKG_DEST"
+ python3 -m compileall "$PKG_DEST/usr/share/virt-manager"
+ python3 -O -m compileall "$PKG_DEST/usr/share/virt-manager"
+}
diff --git a/repo/weechat-matrix/weechat-matrix.post-install b/repo/weechat-matrix/weechat-matrix.post-install
new file mode 100644
index 0000000..3aa4afe
--- /dev/null
+++ b/repo/weechat-matrix/weechat-matrix.post-install
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+cat <<EOF
+*
+* To load this script, execute the following command in WeeChat:
+* /script load weechat-matrix.py
+* To load it automatically during WeeChat startup, use:
+* $ mkdir -p ~/.local/share/weechat/python/autoload
+* $ ln -s /usr/share/weechat/python/weechat-matrix.py -t ~/.local/share/weechat/python/autoload
+*
+EOF
+
+exit 0
diff --git a/repo/weechat-matrix/weechat-matrix.xibuild b/repo/weechat-matrix/weechat-matrix.xibuild
new file mode 100644
index 0000000..fb9134e
--- /dev/null
+++ b/repo/weechat-matrix/weechat-matrix.xibuild
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+NAME="weechat-matrix"
+DESC="WeeChat Matrix protocol script"
+
+MAKEDEPS="poetry python-build python-importlib-metadata python-installer python-poetry-core python-wheel python-webcolors python-hypothesis python-atomicwrites python-nio python-exceptiongroup python-sortedcontainers"
+DEPS="python-build python-importlib-metadata python-poetry-core python-openssl python-logbook python-future python-nio python-webcolors python-matrix-nio python-atomicwrites"
+
+SOURCE="https://github.com/poljar/weechat-matrix.git"
+
+build() {
+ sed -i 's_#!/usr/bin/env -S python3 -u_#!/usr/bin/env python3 -u_g' contrib/matrix_sso_helper.py contrib/matrix_upload.py
+ GIT_DIR=. python3 -m build --skip-dependency-check --no-isolation --wheel .
+}
+
+package() {
+ python3 -m installer --destdir="$PKG_DEST" dist/*.whl
+ install -Dm755 main.py "$PKG_DEST/usr/share/weechat/python/weechat-matrix.py"
+
+ for _script in matrix_decrypt matrix_sso_helper matrix_upload
+ do
+ install -Dm755 "contrib/$_script.py" "$PKG_DEST/usr/bin/$_script"
+ done
+}
+
+postinstall () {
+cat <<EOF
+*
+* To load this script, execute the following command in WeeChat:
+* /script load weechat-matrix.py
+* To load it automatically during WeeChat startup, use:
+* $ mkdir -p ~/.local/share/weechat/python/autoload
+* $ ln -s /usr/share/weechat/python/weechat-matrix.py -t ~/.local/share/weechat/python/autoload
+*
+EOF
+
+exit 0
+}
diff --git a/skip/ceph/10-musl-fixes.patch b/skip/ceph/10-musl-fixes.patch
new file mode 100644
index 0000000..1b7c907
--- /dev/null
+++ b/skip/ceph/10-musl-fixes.patch
@@ -0,0 +1,15 @@
+fix for musl
+
+diff -Nurp a/src/tools/rbd_nbd/rbd-nbd.cc b/src/tools/rbd_nbd/rbd-nbd.cc
+--- a/src/tools/rbd_nbd/rbd-nbd.cc 2020-11-21 08:06:35.834423310 +0000
++++ b/src/tools/rbd_nbd/rbd-nbd.cc 2020-11-21 08:21:12.067978842 +0000
+@@ -576,7 +576,8 @@ private:
+ for (unsigned i = 0; i < cmdline.size(); i++) {
+ char *arg = &cmdline[i];
+ if (i == 0) {
+- if (strcmp(basename(arg) , "rbd-nbd") != 0) {
++ const char *fname = strrchr(arg, '/');
++ if (strcmp(fname ? fname+1 : arg, "rbd-nbd") != 0) {
+ return -EINVAL;
+ }
+ } else {
diff --git a/skip/ceph/11-dump_time_header_impl.patch b/skip/ceph/11-dump_time_header_impl.patch
new file mode 100644
index 0000000..d19e7ed
--- /dev/null
+++ b/skip/ceph/11-dump_time_header_impl.patch
@@ -0,0 +1,34 @@
+Patch by Robin Mueller
+
+The strftime method of the libmusl writes 'UTC' instead of 'GMT' when
+the character Z is used in the format pattern, and it looks like the
+S3 clients don't like 'UTC' in the date strings.
+
+This patch replaces 'UTC' with 'GMT' at the relevant location.
+
+--- a/src/rgw/rgw_rest.cc 2021-07-08 16:03:56.000000000 +0200
++++ b/src/rgw/rgw_rest.cc 2021-08-19 09:48:30.339492024 +0200
+@@ -436,8 +436,21 @@
+ return 0;
+ }
+
+- return strftime(timestr, sizeof(timestr),
+- "%a, %d %b %Y %H:%M:%S %Z", tmp);
++ size_t len = strftime(timestr, sizeof(timestr),
++ "%a, %d %b %Y %H:%M:%S %Z", tmp);
++
++ int position = 0;
++ while (timestr[position] != 'U' && len - position > 3)
++ position++;
++
++ if (len - position == 3) {
++ char substr[4];
++ memcpy(substr, &timestr[position], 4);
++
++ if (strcmp(substr, "UTC") == 0)
++ memcpy(&timestr[position], "GMT", 3);
++ }
++ return len;
+ }
+
+ void dump_time_header(struct req_state *s, const char *name, real_time t)
diff --git a/skip/ceph/11-parse_rfc1123_alt.patch b/skip/ceph/11-parse_rfc1123_alt.patch
new file mode 100644
index 0000000..5b54c4e
--- /dev/null
+++ b/skip/ceph/11-parse_rfc1123_alt.patch
@@ -0,0 +1,53 @@
+Patch by Robin Mueller
+
+libmusl doesn't support the z character in the format pattern for strptime this
+is a special functionality of glibc.
+
+patch is slightly adapted version of glibc code:
+https://elixir.bootlin.com/glibc/latest/source/time/strptime_l.c#L776
+
+--- a/src/rgw/rgw_common.cc 2021-07-08 16:03:56.000000000 +0200
++++ b/src/rgw/rgw_common.cc 2021-08-18 13:08:22.938903459 +0200
+@@ -531,7 +531,41 @@
+ {
+ // FIPS zeroization audit 20191115: this memset is not security related.
+ memset(t, 0, sizeof(*t));
+- return check_str_end(strptime(s, "%a, %d %b %Y %H:%M:%S %z", t));
++ s = strptime(s, "%a, %d %b %Y %H:%M:%S", t);
++ if (s) {
++ s++;
++ int val;
++ val = 0;
++ while (isspace(*s))
++ ++s;
++ if (*s == 'Z') {
++ ++s;
++ t->tm_gmtoff = 0;
++ } else {
++ if (*s != '+' && *s != '-')
++ return 0;
++ bool neg = *s++ == '-';
++ int n = 0;
++ while (n < 4 && *s >= '0' && *s <= '9') {
++ val = val * 10 + *s++ - '0';
++ ++n;
++ if (*s == ':' && n == 2 && isdigit (*(s + 1)))
++ ++s;
++ }
++ if (n == 2)
++ val *= 100;
++ else if (n != 4)
++ /* Only two or four digits recognized. */
++ return 0;
++ else if (val % 100 >= 60)
++ /* Minutes valid range is 0 through 59. */
++ return 0;
++ t->tm_gmtoff = (val / 100) * 3600 + (val % 100) * 60;
++ if (neg)
++ t->tm_gmtoff = -t->tm_gmtoff;
++ }
++ }
++ return check_str_end(s);
+ }
+
+ bool parse_rfc2616(const char *s, struct tm *t)
diff --git a/skip/ceph/11-s3_expiration_header.patch b/skip/ceph/11-s3_expiration_header.patch
new file mode 100644
index 0000000..ac12a83
--- /dev/null
+++ b/skip/ceph/11-s3_expiration_header.patch
@@ -0,0 +1,30 @@
+Patch by Robin Mueller
+
+Fix musl date handling
+
+--- a/src/rgw/rgw_lc.cc 2021-09-16 16:27:19.000000000 +0200
++++ b/src/rgw/rgw_lc.cc 2021-10-01 09:17:06.996639952 +0200
+@@ -2238,8 +2238,21 @@
+ // Fri, 23 Dec 2012 00:00:00 GMT
+ char exp_buf[100];
+ time_t exp = ceph::real_clock::to_time_t(*expiration_date);
+- if (std::strftime(exp_buf, sizeof(exp_buf),
+- "%a, %d %b %Y %T %Z", std::gmtime(&exp))) {
++ std::size_t len = std::strftime(exp_buf, sizeof(exp_buf), "%a, %d %b %Y %T %Z", std::gmtime(&exp));
++
++ if (len) {
++ int position = 0;
++ while (exp_buf[position] != 'U' && len - position > 3)
++ position++;
++
++ if (len - position == 3) {
++ char substr[4];
++ memcpy(substr, &exp_buf[position], 4);
++
++ if (strcmp(substr, "UTC") == 0)
++ memcpy(&exp_buf[position], "GMT", 3);
++ }
++
+ hdr = fmt::format("expiry-date=\"{0}\", rule-id=\"{1}\"", exp_buf,
+ *rule_id);
+ } else {
diff --git a/skip/ceph/12-package.json-resolutions.patch b/skip/ceph/12-package.json-resolutions.patch
new file mode 100644
index 0000000..ddc4ea2
--- /dev/null
+++ b/skip/ceph/12-package.json-resolutions.patch
@@ -0,0 +1,31 @@
+--- a/src/pybind/mgr/dashboard/CMakeLists.txt
++++ b/src/pybind/mgr/dashboard/CMakeLists.txt
+@@ -76,7 +76,7 @@
+
+ add_npm_command(
+ OUTPUT "${CMAKE_SOURCE_DIR}/src/pybind/mgr/dashboard/frontend/node_modules"
+- COMMAND CYPRESS_CACHE_FOLDER=${CMAKE_SOURCE_DIR}/build/src/pybind/mgr/dashboard/cypress NG_CLI_ANALYTICS=false npm ci ${mgr-dashboard-userconfig}
++ COMMAND CYPRESS_CACHE_FOLDER=${CMAKE_SOURCE_DIR}/build/src/pybind/mgr/dashboard/cypress NG_CLI_ANALYTICS=false yarn install --network-timeout 600000 --frozen-lockfile ${mgr-dashboard-userconfig}
+ DEPENDS frontend/package.json
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/pybind/mgr/dashboard/frontend
+ COMMENT "dashboard frontend dependencies are being installed"
+@@ -119,7 +119,7 @@
+
+ add_npm_command(
+ OUTPUT "${CMAKE_SOURCE_DIR}/src/pybind/mgr/dashboard/frontend/dist"
+- COMMAND DASHBOARD_FRONTEND_LANGS="${DASHBOARD_FRONTEND_LANGS}" npm run build:localize -- ${npm_args}
++ COMMAND DASHBOARD_FRONTEND_LANGS="${DASHBOARD_FRONTEND_LANGS}" yarn run build:localize ${npm_args}
+ DEPENDS ${frontend_src} frontend/node_modules
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/pybind/mgr/dashboard/frontend
+ COMMENT "dashboard frontend is being created"
+--- a/src/pybind/mgr/dashboard/frontend/package.json
++++ b/src/pybind/mgr/dashboard/frontend/package.json
+@@ -142,8 +142,5 @@
+ "ts-node": "9.0.0",
+ "tslint": "6.1.3",
+ "typescript": "4.1.6"
+- },
+- "resolutions": {
+- "fsevents": "2.1.3"
+ }
+ }
diff --git a/skip/ceph/20-pci.patch b/skip/ceph/20-pci.patch
new file mode 100644
index 0000000..c16e276
--- /dev/null
+++ b/skip/ceph/20-pci.patch
@@ -0,0 +1,63 @@
+Musl patch for pci
+
+diff -Nurp a/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c b/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c
+--- a/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c 2020-11-21 13:07:44.255206657 +0000
++++ b/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c 2020-11-21 13:04:06.488285583 +0000
+@@ -14,6 +14,32 @@
+
+ #if defined(RTE_ARCH_X86)
+ #include <sys/io.h>
++#if defined(__GLIBC__)
++#define pci_uio_outl_p outl_p
++#define pci_uio_outw_p outw_p
++#define pci_uio_outb_p outb_p
++#else
++static inline void
++pci_uio_outl_p(unsigned int value, unsigned short int port)
++{
++ __asm__ __volatile__ ("outl %0,%w1\noutb %%al,$0x80" : : "a" (value),
++ "Nd" (port));
++}
++
++static inline void
++pci_uio_outw_p(unsigned short int value, unsigned short int port)
++{
++ __asm__ __volatile__ ("outw %w0,%w1\noutb %%al,$0x80" : : "a" (value),
++ "Nd" (port));
++}
++
++static inline void
++pci_uio_outb_p(unsigned char value, unsigned short int port)
++{
++ __asm__ __volatile__ ("outb %b0,%w1\noutb %%al,$0x80" : : "a" (value),
++ "Nd" (port));
++}
++#endif
+ #endif
+
+ #include <rte_string_fns.h>
+@@ -528,21 +554,21 @@ pci_uio_ioport_write(struct rte_pci_iopo
+ if (len >= 4) {
+ size = 4;
+ #if defined(RTE_ARCH_X86)
+- outl_p(*(const uint32_t *)s, reg);
++ pci_uio_outl_p(*(const uint32_t *)s, reg);
+ #else
+ *(volatile uint32_t *)reg = *(const uint32_t *)s;
+ #endif
+ } else if (len >= 2) {
+ size = 2;
+ #if defined(RTE_ARCH_X86)
+- outw_p(*(const uint16_t *)s, reg);
++ pci_uio_outw_p(*(const uint16_t *)s, reg);
+ #else
+ *(volatile uint16_t *)reg = *(const uint16_t *)s;
+ #endif
+ } else {
+ size = 1;
+ #if defined(RTE_ARCH_X86)
+- outb_p(*s, reg);
++ pci_uio_outb_p(*s, reg);
+ #else
+ *(volatile uint8_t *)reg = *s;
+ #endif
diff --git a/skip/ceph/30-32bit_fix.patch.noauto b/skip/ceph/30-32bit_fix.patch.noauto
new file mode 100644
index 0000000..aba21d6
--- /dev/null
+++ b/skip/ceph/30-32bit_fix.patch.noauto
@@ -0,0 +1,110 @@
+32bit specific patches
+
+diff -uNr ceph-15.2.4/src/client/Client.cc ceph-15.2.4-arm32_fix/src/client/Client.cc
+--- ceph-15.2.4/src/client/Client.cc 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-arm32_fix/src/client/Client.cc 2020-11-21 22:11:16.061796876 +1030
+@@ -10948,7 +10948,7 @@
+ ldout(cct, 20) << __func__ << " " << in << " " << in->ino << " -> " << in->ll_ref << dendl;
+ }
+
+-int Client::_ll_put(Inode *in, uint64_t num)
++int Client::_ll_put(Inode *in, size_t num)
+ {
+ in->ll_put(num);
+ ldout(cct, 20) << __func__ << " " << in << " " << in->ino << " " << num << " -> " << in->ll_ref << dendl;
+@@ -10989,7 +10989,7 @@
+ }
+ }
+
+-bool Client::_ll_forget(Inode *in, uint64_t count)
++bool Client::_ll_forget(Inode *in, size_t count)
+ {
+ inodeno_t ino = in->ino;
+
+@@ -11018,7 +11018,7 @@
+ return last;
+ }
+
+-bool Client::ll_forget(Inode *in, uint64_t count)
++bool Client::ll_forget(Inode *in, size_t count)
+ {
+ std::lock_guard lock(client_lock);
+ return _ll_forget(in, count);
+diff -uNr ceph-15.2.4/src/mds/PurgeQueue.h ceph-15.2.4-arm32_fix/src/mds/PurgeQueue.h
+--- ceph-15.2.4/src/mds/PurgeQueue.h 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-arm32_fix/src/mds/PurgeQueue.h 2020-11-21 22:11:16.065796889 +1030
+@@ -219,6 +219,6 @@
+ size_t purge_item_journal_size;
+
+ uint64_t ops_high_water = 0;
+- uint64_t files_high_water = 0;
++ size_t files_high_water = 0;
+ };
+ #endif
+diff -uNr ceph-15.2.4/src/test/common/test_json_formattable.cc ceph-15.2.4-arm32_fix/src/test/common/test_json_formattable.cc
+--- ceph-15.2.4/src/test/common/test_json_formattable.cc 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-arm32_fix/src/test/common/test_json_formattable.cc 2020-11-21 22:11:16.065796889 +1030
+@@ -371,7 +371,7 @@
+
+ struct2() {
+ void *p = (void *)this;
+- long i = (long)p;
++ unsigned long i = (unsigned long)p;
+ v.resize((i >> 16) % 16 + 1);
+ }
+
+diff -uNr ceph-15.2.4/src/test/libcephfs/ceph_pthread_self.h ceph-15.2.4-arm32_fix/src/test/libcephfs/ceph_pthread_self.h
+--- ceph-15.2.4/src/test/libcephfs/ceph_pthread_self.h 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-arm32_fix/src/test/libcephfs/ceph_pthread_self.h 2020-11-21 22:11:16.066796893 +1030
+@@ -25,7 +25,7 @@
+ static_assert(std::is_convertible_v<decltype(me), uint64_t> ||
+ std::is_pointer_v<decltype(me)>,
+ "we need to use pthread_self() for the owner parameter");
+- return reinterpret_cast<uint64_t>(me);
++ return reinterpret_cast<uint64_t>((uint64_t) me);
+ }
+
+ #endif
+diff -uNr ceph-15.2.4/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc ceph-15.2.4-arm32_fix/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc
+--- ceph-15.2.4/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-arm32_fix/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc 2020-11-21 22:11:16.066796893 +1030
+@@ -162,7 +162,7 @@
+ int r) {
+ bufferlist bl;
+ encode(last_image_id, bl);
+- encode(static_cast<size_t>(1024), bl);
++ encode(static_cast<uint64_t>(1024), bl);
+
+ bufferlist out_bl;
+ encode(images, out_bl);
+diff -Nurp a/src/client/Client.h b/src/client/Client.h
+--- a/src/client/Client.h
++++ b/src/client/Client.h
+@@ -525,7 +525,7 @@
+ int ll_lookupx(Inode *parent, const char *name, Inode **out,
+ struct ceph_statx *stx, unsigned want, unsigned flags,
+ const UserPerm& perms);
+- bool ll_forget(Inode *in, uint64_t count);
++ bool ll_forget(Inode *in, size_t count);
+ bool ll_put(Inode *in);
+ int ll_get_snap_ref(snapid_t snap);
+
+@@ -1241,7 +1241,7 @@
+ void _fragmap_remove_stopped_mds(Inode *in, mds_rank_t mds);
+
+ void _ll_get(Inode *in);
+- int _ll_put(Inode *in, uint64_t num);
++ int _ll_put(Inode *in, size_t num);
+ void _ll_drop_pins();
+
+ Fh *_create_fh(Inode *in, int flags, int cmode, const UserPerm& perms);
+@@ -1405,7 +1405,7 @@
+ int _lookup_parent(Inode *in, const UserPerm& perms, Inode **parent=NULL);
+ int _lookup_name(Inode *in, Inode *parent, const UserPerm& perms);
+ int _lookup_vino(vinodeno_t ino, const UserPerm& perms, Inode **inode=NULL);
+- bool _ll_forget(Inode *in, uint64_t count);
++ bool _ll_forget(Inode *in, size_t count);
+
+ void collect_and_send_metrics();
+ void collect_and_send_global_metrics();
+
diff --git a/skip/ceph/30-cypress.patch.noauto b/skip/ceph/30-cypress.patch.noauto
new file mode 100644
index 0000000..fecf055
--- /dev/null
+++ b/skip/ceph/30-cypress.patch.noauto
@@ -0,0 +1,14 @@
+remove cypress as not availiable on 32bit platforms
+
+diff -Nurp a/src/pybind/mgr/dashboard/frontend/package.json b/src/pybind/mgr/dashboard/frontend/package.json
+--- a/src/pybind/mgr/dashboard/frontend/package.json 2021-04-03 08:58:07.611941559 +0100
++++ b/src/pybind/mgr/dashboard/frontend/package.json 2021-04-03 08:59:13.903122038 +0100
+@@ -119,8 +119,6 @@
+ "@types/node": "12.12.62",
+ "@types/simplebar": "5.1.1",
+ "codelyzer": "6.0.1",
+- "cypress": "5.3.0",
+- "cypress-multi-reporters": "1.4.0",
+ "html-linter": "1.1.1",
+ "htmllint-cli": "0.0.7",
+ "identity-obj-proxy": "3.0.0",
diff --git a/skip/ceph/30-ubuntu-32bit-fixes.patch.noauto b/skip/ceph/30-ubuntu-32bit-fixes.patch.noauto
new file mode 100644
index 0000000..9b9318d
--- /dev/null
+++ b/skip/ceph/30-ubuntu-32bit-fixes.patch.noauto
@@ -0,0 +1,137 @@
+Description: Misc fixes for 32 bit architecture builds.
+Author: James Page <james.page@ubuntu.com>
+Forwarded: no
+
+--- a/src/tools/rbd_mirror/image_replayer/snapshot/Replayer.cc
++++ b/src/tools/rbd_mirror/image_replayer/snapshot/Replayer.cc
+@@ -235,7 +235,8 @@ bool Replayer<I>::get_replay_status(std:
+
+ json_spirit::mObject root_obj;
+ root_obj["replay_state"] = replay_state;
+- root_obj["remote_snapshot_timestamp"] = remote_snap_info->timestamp.sec();
++ root_obj["remote_snapshot_timestamp"] = static_cast<uint64_t>(
++ remote_snap_info->timestamp.sec());
+
+ auto matching_remote_snap_id = util::compute_remote_snap_id(
+ m_state_builder->local_image_ctx->image_lock,
+@@ -249,8 +250,8 @@ bool Replayer<I>::get_replay_status(std:
+ // use the timestamp from the matching remote image since
+ // the local snapshot would just be the time the snapshot was
+ // synced and not the consistency point in time.
+- root_obj["local_snapshot_timestamp"] =
+- matching_remote_snap_it->second.timestamp.sec();
++ root_obj["local_snapshot_timestamp"] = static_cast<uint64_t>(
++ matching_remote_snap_it->second.timestamp.sec());
+ }
+
+ matching_remote_snap_it = m_state_builder->remote_image_ctx->snap_info.find(
+@@ -258,7 +259,8 @@ bool Replayer<I>::get_replay_status(std:
+ if (m_remote_snap_id_end != CEPH_NOSNAP &&
+ matching_remote_snap_it !=
+ m_state_builder->remote_image_ctx->snap_info.end()) {
+- root_obj["syncing_snapshot_timestamp"] = remote_snap_info->timestamp.sec();
++ root_obj["syncing_snapshot_timestamp"] = static_cast<uint64_t>(
++ remote_snap_info->timestamp.sec());
+ root_obj["syncing_percent"] = static_cast<uint64_t>(
+ 100 * m_local_mirror_snap_ns.last_copied_object_number /
+ static_cast<float>(std::max<uint64_t>(1U, m_local_object_count)));
+--- a/src/s3select/include/s3select_functions.h
++++ b/src/s3select/include/s3select_functions.h
+@@ -585,7 +585,7 @@ struct _fn_diff_timestamp : public base_
+ {
+ boost::gregorian::date_period dp =
+ boost::gregorian::date_period( val_dt1.timestamp()->date(), val_dt2.timestamp()->date());
+- result->set_value( dp.length().days() );
++ result->set_value( (int64_t)dp.length().days() );
+ }
+ else if (strcmp(val_date_part.str(), "hours") == 0)
+ {
+--- a/src/os/bluestore/BlueFS.cc
++++ b/src/os/bluestore/BlueFS.cc
+@@ -3744,11 +3744,11 @@ int BlueFS::do_replay_recovery_read(File
+
+ size_t BlueFS::probe_alloc_avail(int dev, uint64_t alloc_size)
+ {
+- size_t total = 0;
+- auto iterated_allocation = [&](size_t off, size_t len) {
++ uint64_t total = 0;
++ auto iterated_allocation = [&](uint64_t off, uint64_t len) {
+ //only count in size that is alloc_size aligned
+- size_t dist_to_alignment;
+- size_t offset_in_block = off & (alloc_size - 1);
++ uint64_t dist_to_alignment;
++ uint64_t offset_in_block = off & (alloc_size - 1);
+ if (offset_in_block == 0)
+ dist_to_alignment = 0;
+ else
+--- a/src/tools/neorados.cc
++++ b/src/tools/neorados.cc
+@@ -146,7 +146,7 @@ void create(R::RADOS& r, const std::vect
+ obj, pname));
+ }
+
+-inline constexpr std::size_t io_size = 4 << 20;
++inline constexpr std::uint64_t io_size = 4 << 20;
+
+ void write(R::RADOS& r, const std::vector<std::string>& p, s::yield_context y)
+ {
+@@ -156,7 +156,7 @@ void write(R::RADOS& r, const std::vecto
+
+ bs::error_code ec;
+ std::unique_ptr<char[]> buf = std::make_unique<char[]>(io_size);
+- std::size_t off = 0;
++ std::uint64_t off = 0;
+ boost::io::ios_exception_saver ies(std::cin);
+
+ std::cin.exceptions(std::istream::badbit);
+@@ -203,7 +203,7 @@ void read(R::RADOS& r, const std::vector
+ obj, pname));
+ }
+
+- std::size_t off = 0;
++ std::uint64_t off = 0;
+ ceph::buffer::list bl;
+ while (auto toread = std::max(len - off, io_size)) {
+ R::ReadOp op;
+--- a/src/tools/cephfs_mirror/FSMirror.cc
++++ b/src/tools/cephfs_mirror/FSMirror.cc
+@@ -334,7 +334,7 @@ void FSMirror::handle_acquire_directory(
+ std::scoped_lock locker(m_lock);
+ m_directories.emplace(dir_path);
+ m_service_daemon->add_or_update_fs_attribute(m_filesystem.fscid, SERVICE_DAEMON_DIR_COUNT_KEY,
+- m_directories.size());
++ static_cast<uint64_t>(m_directories.size()));
+
+ for (auto &[peer, peer_replayer] : m_peer_replayers) {
+ dout(10) << ": peer=" << peer << dendl;
+@@ -352,7 +352,7 @@ void FSMirror::handle_release_directory(
+ if (it != m_directories.end()) {
+ m_directories.erase(it);
+ m_service_daemon->add_or_update_fs_attribute(m_filesystem.fscid, SERVICE_DAEMON_DIR_COUNT_KEY,
+- m_directories.size());
++ static_cast<uint64_t>(m_directories.size()));
+ for (auto &[peer, peer_replayer] : m_peer_replayers) {
+ dout(10) << ": peer=" << peer << dendl;
+ peer_replayer->remove_directory(dir_path);
+--- a/src/librbd/object_map/DiffRequest.cc
++++ b/src/librbd/object_map/DiffRequest.cc
+@@ -175,7 +175,7 @@ void DiffRequest<I>::handle_load_object_
+ m_object_map.resize(num_objs);
+ }
+
+- size_t prev_object_diff_state_size = m_object_diff_state->size();
++ uint64_t prev_object_diff_state_size = m_object_diff_state->size();
+ if (prev_object_diff_state_size < num_objs) {
+ // the diff state should be the largest of all snapshots in the set
+ m_object_diff_state->resize(num_objs);
+--- a/src/SimpleRADOSStriper.cc
++++ b/src/SimpleRADOSStriper.cc
+@@ -140,7 +140,7 @@ int SimpleRADOSStriper::remove()
+ return 0;
+ }
+
+-int SimpleRADOSStriper::truncate(uint64_t size)
++int SimpleRADOSStriper::truncate(size_t size)
+ {
+ d(5) << size << dendl;
+
diff --git a/skip/ceph/31-32bit_fix_tests.patch.noauto b/skip/ceph/31-32bit_fix_tests.patch.noauto
new file mode 100644
index 0000000..939c550
--- /dev/null
+++ b/skip/ceph/31-32bit_fix_tests.patch.noauto
@@ -0,0 +1,66 @@
+--- a/src/test/objectstore/test_bdev.cc
++++ b/src/test/objectstore/test_bdev.cc
+@@ -54,8 +54,8 @@
+ BlockDevice::create(g_ceph_context, bdev.path, NULL, NULL,
+ [](void* handle, void* aio) {}, NULL));
+ bufferlist bl;
+- // writing a bit less than 4GB
+- for (auto i = 0; i < 4000; i++) {
++ // writing a bit less than 1GB
++ for (auto i = 0; i < 1000; i++) {
+ string s(1048576, 'a' + (i % 28));
+ bl.append(s);
+ }
+--- a/src/test/objectstore/test_bluefs.cc
++++ b/src/test/objectstore/test_bluefs.cc
+@@ -237,7 +237,7 @@
+ }
+
+ TEST(BlueFS, very_large_write) {
+- // we'll write a ~5G file, so allocate more than that for the whole fs
++ // we'll write a ~1G file, so allocate more than that for the whole fs
+ uint64_t size = 1048576 * 1024 * 6ull;
+ TempBdev bdev{size};
+ BlueFS fs(g_ceph_context);
+@@ -260,12 +260,12 @@
+ BlueFS::FileWriter *h;
+ ASSERT_EQ(0, fs.mkdir("dir"));
+ ASSERT_EQ(0, fs.open_for_write("dir", "bigfile", &h, false));
+- for (unsigned i = 0; i < 3*1024*1048576ull / sizeof(buf); ++i) {
++ for (unsigned i = 0; i < 1*1024*1048576ull / sizeof(buf); ++i) {
+ h->append(buf, sizeof(buf));
+ total_written += sizeof(buf);
+ }
+ fs.fsync(h);
+- for (unsigned i = 0; i < 2*1024*1048576ull / sizeof(buf); ++i) {
++ for (unsigned i = 0; i < 1*1024*1048576ull / sizeof(buf); ++i) {
+ h->append(buf, sizeof(buf));
+ total_written += sizeof(buf);
+ }
+@@ -278,7 +278,7 @@
+ bufferlist bl;
+ BlueFS::FileReaderBuffer readbuf(10485760);
+ ASSERT_EQ(h->file->fnode.size, total_written);
+- for (unsigned i = 0; i < 3*1024*1048576ull / sizeof(buf); ++i) {
++ for (unsigned i = 0; i < 1*1024*1048576ull / sizeof(buf); ++i) {
+ bl.clear();
+ fs.read(h, &readbuf, i * sizeof(buf), sizeof(buf), &bl, NULL);
+ int r = memcmp(buf, bl.c_str(), sizeof(buf));
+@@ -288,7 +288,7 @@
+ }
+ ASSERT_EQ(0, r);
+ }
+- for (unsigned i = 0; i < 2*1024*1048576ull / sizeof(buf); ++i) {
++ for (unsigned i = 0; i < 1*1024*1048576ull / sizeof(buf); ++i) {
+ bl.clear();
+ fs.read(h, &readbuf, i * sizeof(buf), sizeof(buf), &bl, NULL);
+ int r = memcmp(buf, bl.c_str(), sizeof(buf));
+@@ -313,7 +313,7 @@
+ }
+
+ TEST(BlueFS, very_large_write2) {
+- // we'll write a ~5G file, so allocate more than that for the whole fs
++ // we'll write a ~1G file, so allocate more than that for the whole fs
+ uint64_t size_full = 1048576 * 1024 * 6ull;
+ uint64_t size = 1048576 * 1024 * 5ull;
+ TempBdev bdev{ size_full };
diff --git a/skip/ceph/32-PurgeQueue.cc-cast.patch b/skip/ceph/32-PurgeQueue.cc-cast.patch
new file mode 100644
index 0000000..19a0719
--- /dev/null
+++ b/skip/ceph/32-PurgeQueue.cc-cast.patch
@@ -0,0 +1,85 @@
+Submitted as: https://github.com/ceph/ceph/pull/41235
+
+commit 953e7dc0f911f84a4bb377aee45b22e2ffad6867
+Author: Duncan Bellamy <dunk@denkimushi.com>
+Date: Sat May 8 11:52:35 2021 +0100
+
+ mds: PurgeQueue.cc add static cast for 32bit compilation
+
+ files_high_water is defined as uint64_t but when compiling on 32bit these max functions
+ fail as they are both not considered uint64_t by gcc 10 even though they are
+
+ files_high_water = std::max(files_high_water,
+ static_cast<uint64_t>(in_flight.size()));
+
+ Fixes: https://tracker.ceph.com/issues/50707
+
+ Signed-off-by: Duncan Bellamy <dunk@denkimushi.com>
+
+diff --git a/src/mds/PurgeQueue.cc b/src/mds/PurgeQueue.cc
+index 977be2c118..3104a3ccc4 100644
+--- a/src/mds/PurgeQueue.cc
++++ b/src/mds/PurgeQueue.cc
+@@ -7,9 +7,9 @@
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+- * License version 2.1, as published by the Free Software
++ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+- *
++ *
+ */
+
+ #include "common/debug.h"
+@@ -594,8 +594,8 @@ void PurgeQueue::_execute_item(
+
+ in_flight[expire_to] = item;
+ logger->set(l_pq_executing, in_flight.size());
+- files_high_water = std::max(files_high_water,
+- static_cast<uint64_t>(in_flight.size()));
++ files_high_water = std::max<uint64_t>(files_high_water,
++ in_flight.size());
+ logger->set(l_pq_executing_high_water, files_high_water);
+ auto ops = _calculate_ops(item);
+ ops_in_flight += ops;
+@@ -662,8 +662,8 @@ void PurgeQueue::_execute_item(
+ logger->set(l_pq_executing_ops_high_water, ops_high_water);
+ in_flight.erase(expire_to);
+ logger->set(l_pq_executing, in_flight.size());
+- files_high_water = std::max(files_high_water,
+- static_cast<uint64_t>(in_flight.size()));
++ files_high_water = std::max<uint64_t>(files_high_water,
++ in_flight.size());
+ logger->set(l_pq_executing_high_water, files_high_water);
+ return;
+ }
+@@ -716,19 +716,19 @@ void PurgeQueue::_execute_item_complete(
+
+ in_flight.erase(iter);
+ logger->set(l_pq_executing, in_flight.size());
+- files_high_water = std::max(files_high_water,
+- static_cast<uint64_t>(in_flight.size()));
++ files_high_water = std::max<uint64_t>(files_high_water,
++ in_flight.size());
+ logger->set(l_pq_executing_high_water, files_high_water);
+ dout(10) << "in_flight.size() now " << in_flight.size() << dendl;
+
+- uint64_t write_pos = journaler.get_write_pos();
+- uint64_t read_pos = journaler.get_read_pos();
+- uint64_t expire_pos = journaler.get_expire_pos();
+- uint64_t item_num = (write_pos - (in_flight.size() ? expire_pos : read_pos))
++ uint64_t write_pos = journaler.get_write_pos();
++ uint64_t read_pos = journaler.get_read_pos();
++ uint64_t expire_pos = journaler.get_expire_pos();
++ uint64_t item_num = (write_pos - (in_flight.size() ? expire_pos : read_pos))
+ / purge_item_journal_size;
+- dout(10) << "left purge items in journal: " << item_num
+- << " (purge_item_journal_size/write_pos/read_pos/expire_pos) now at "
+- << "(" << purge_item_journal_size << "/" << write_pos << "/" << read_pos
++ dout(10) << "left purge items in journal: " << item_num
++ << " (purge_item_journal_size/write_pos/read_pos/expire_pos) now at "
++ << "(" << purge_item_journal_size << "/" << write_pos << "/" << read_pos
+ << "/" << expire_pos << ")" << dendl;
+
+ logger->set(l_pq_item_in_journal, item_num);
diff --git a/skip/ceph/32-upstream32bit.patch b/skip/ceph/32-upstream32bit.patch
new file mode 100644
index 0000000..917cd03
--- /dev/null
+++ b/skip/ceph/32-upstream32bit.patch
@@ -0,0 +1,92 @@
+commit 72a5993da70955182a73755ddba35005a6d9fc11
+Author: Kefu Chai <kchai@redhat.com>
+Date: Tue Apr 27 18:24:24 2021 +0800
+
+ cls/rbd, librbd: use uint64_t for sparse_size
+
+ the size of `size_t` is varies from architecture to architecture. the
+ C++ standard only requires it to be able to represent the maximum possible
+ size of object of any type. on 32-bit architectures, it's very likely a
+ 32-bit unsigned integer. to ensure the interoperability between the
+ 64-bit systems and 32-bit systems, we should use a type with explicitly
+ defined size.
+
+ also, we don't define the dencoder for size_t. so on systems where
+ size_t is not backed by uint32_t or uint64_t, the tree does not compile.
+
+ in this change, use uint64_t for sparse_size. and leave
+ `C_SparsifyObject::m_sparse_size` intact. as the latter should be able
+ to be promoted to uint64_t when necessary.
+
+ this change is backward compatible on 64-bit systems.
+
+ Signed-off-by: Kefu Chai <kchai@redhat.com>
+
+diff --git a/src/cls/rbd/cls_rbd.cc b/src/cls/rbd/cls_rbd.cc
+index 33910b7df5..e3e05d85ed 100644
+--- a/src/cls/rbd/cls_rbd.cc
++++ b/src/cls/rbd/cls_rbd.cc
+@@ -7996,7 +7996,7 @@ int namespace_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+ */
+ int sparsify(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
+ {
+- size_t sparse_size;
++ uint64_t sparse_size;
+ bool remove_empty;
+ try {
+ auto iter = in->cbegin();
+diff --git a/src/cls/rbd/cls_rbd_client.cc b/src/cls/rbd/cls_rbd_client.cc
+index cefa1fed79..fee3ac8923 100644
+--- a/src/cls/rbd/cls_rbd_client.cc
++++ b/src/cls/rbd/cls_rbd_client.cc
+@@ -2974,7 +2974,7 @@ int namespace_list(librados::IoCtx *ioctx,
+ return namespace_list_finish(&iter, entries);
+ }
+
+-void sparsify(librados::ObjectWriteOperation *op, size_t sparse_size,
++void sparsify(librados::ObjectWriteOperation *op, uint64_t sparse_size,
+ bool remove_empty)
+ {
+ bufferlist bl;
+@@ -2983,7 +2983,7 @@ void sparsify(librados::ObjectWriteOperation *op, size_t sparse_size,
+ op->exec("rbd", "sparsify", bl);
+ }
+
+-int sparsify(librados::IoCtx *ioctx, const std::string &oid, size_t sparse_size,
++int sparsify(librados::IoCtx *ioctx, const std::string &oid, uint64_t sparse_size,
+ bool remove_empty)
+ {
+ librados::ObjectWriteOperation op;
+diff --git a/src/cls/rbd/cls_rbd_client.h b/src/cls/rbd/cls_rbd_client.h
+index 12b34c4832..ef2b05fd84 100644
+--- a/src/cls/rbd/cls_rbd_client.h
++++ b/src/cls/rbd/cls_rbd_client.h
+@@ -652,9 +652,9 @@ int sparse_copyup(librados::IoCtx *ioctx, const std::string &oid,
+ const std::map<uint64_t, uint64_t> &extent_map,
+ ceph::buffer::list data);
+
+-void sparsify(librados::ObjectWriteOperation *op, size_t sparse_size,
++void sparsify(librados::ObjectWriteOperation *op, uint64_t sparse_size,
+ bool remove_empty);
+-int sparsify(librados::IoCtx *ioctx, const std::string &oid, size_t sparse_size,
++int sparsify(librados::IoCtx *ioctx, const std::string &oid, uint64_t sparse_size,
+ bool remove_empty);
+
+ } // namespace cls_client
+diff --git a/src/librbd/WatchNotifyTypes.h b/src/librbd/WatchNotifyTypes.h
+index ca0b40f28f..4fad31ffac 100644
+--- a/src/librbd/WatchNotifyTypes.h
++++ b/src/librbd/WatchNotifyTypes.h
+@@ -410,10 +410,10 @@ struct MigratePayload : public AsyncRequestPayloadBase {
+ };
+
+ struct SparsifyPayload : public AsyncRequestPayloadBase {
+- size_t sparse_size = 0;
++ uint64_t sparse_size = 0;
+
+ SparsifyPayload() {}
+- SparsifyPayload(const AsyncRequestId &id, size_t sparse_size)
++ SparsifyPayload(const AsyncRequestId &id, uint64_t sparse_size)
+ : AsyncRequestPayloadBase(id), sparse_size(sparse_size) {
+ }
+
diff --git a/skip/ceph/32-upstream32bitcleanup.patch b/skip/ceph/32-upstream32bitcleanup.patch
new file mode 100644
index 0000000..1fe036e
--- /dev/null
+++ b/skip/ceph/32-upstream32bitcleanup.patch
@@ -0,0 +1,143 @@
+submitted as https://github.com/ceph/ceph/pull/41239
+
+commit 558adef26a2149b0dd644a2c9a7e2db8d370b556
+Author: Kefu Chai <kchai@redhat.com>
+Date: Sat May 8 21:02:54 2021 +0800
+
+ librbd/deep_copy: cast uint64_t to size_t for constructing SparseBufferlistExtent
+
+ SparseBufferlistExtent's ctor accepts size_t, so, on a 32-bit platform,
+ the parameter would be narrowed before passing to the ctor, and GCC
+ complains at seeing this:
+
+ /builds/a16bitsysop/aports/community/ceph/src/ceph-16.2.3/src/librbd/deep_copy/ObjectCopyRequest.cc:789:60: warning: narrowing conversion of 'object_extent.striper::LightweightObjectExtent::length'
+ from 'uint64_t' {aka 'long long unsigned int'} to 'size_t' {aka 'unsigned int'} [-Wnarrowing]
+ 789 | {io::SPARSE_EXTENT_STATE_ZEROED, object_extent.length});
+ | ~~~~~~~~~~~~~~^~~~~~
+
+ this change make this cast explicit and silences the warning.
+
+ Signed-off-by: Kefu Chai <kchai@redhat.com>
+
+diff --git a/src/librbd/deep_copy/ObjectCopyRequest.cc b/src/librbd/deep_copy/ObjectCopyRequest.cc
+index e86ed5ea1c..efc6749536 100644
+--- a/src/librbd/deep_copy/ObjectCopyRequest.cc
++++ b/src/librbd/deep_copy/ObjectCopyRequest.cc
+@@ -614,7 +614,8 @@ void ObjectCopyRequest<I>::merge_write_ops() {
+
+ m_snapshot_sparse_bufferlist[src_snap_seq].insert(
+ object_extent.offset, object_extent.length,
+- {io::SPARSE_EXTENT_STATE_DATA, object_extent.length,\
++ {io::SPARSE_EXTENT_STATE_DATA,
++ static_cast<size_t>(object_extent.length),
+ std::move(sub_bl)});
+
+ buffer_offset += object_extent.length;
+diff --git a/src/librbd/io/CopyupRequest.cc b/src/librbd/io/CopyupRequest.cc
+index d70851409f..a3af713151 100644
+--- a/src/librbd/io/CopyupRequest.cc
++++ b/src/librbd/io/CopyupRequest.cc
+@@ -711,7 +711,9 @@ int CopyupRequest<I>::prepare_copyup_data() {
+
+ sparse_bufferlist.insert(
+ object_offset, object_length,
+- {SPARSE_EXTENT_STATE_DATA, object_length, std::move(sub_bl)});
++ {SPARSE_EXTENT_STATE_DATA,
++ static_cast<size_t>(object_length),
++ std::move(sub_bl)});
+ }
+ } else {
+ // copyup that will concurrently written to the HEAD revision with the
+commit 130fdf7bcfd2b4c5a5b34809952b69b70e9c11a4
+Author: Kefu Chai <kchai@redhat.com>
+Date: Sat May 8 20:59:07 2021 +0800
+
+ mgr/PyModule: use Py_ssize_t for the PyList index
+
+ also silences the warnings like:
+
+ mgr/PyModule.cc:574:30: warning: comparison of integer expressions of different signedness: 'unsigned int' and 'Py_ssize_t' {aka 'int'} [-Wsign-compare]
+ 574 | for (unsigned i = 0; i < PyList_Size(p); ++i) {
+ | ~~^~~~~~~~~~~~~~~~
+
+ Signed-off-by: Kefu Chai <kchai@redhat.com>
+
+diff --git a/src/mgr/PyModule.cc b/src/mgr/PyModule.cc
+index 28c76fe7ed..ff1ff85e7e 100644
+--- a/src/mgr/PyModule.cc
++++ b/src/mgr/PyModule.cc
+@@ -562,7 +562,7 @@ int PyModule::load_options()
+ }
+ p = PyDict_GetItemString(pOption, "enum_allowed");
+ if (p && PyObject_TypeCheck(p, &PyList_Type)) {
+- for (unsigned i = 0; i < PyList_Size(p); ++i) {
++ for (Py_ssize_t i = 0; i < PyList_Size(p); ++i) {
+ auto q = PyList_GetItem(p, i);
+ if (q) {
+ auto r = PyObject_Str(q);
+@@ -573,7 +573,7 @@ int PyModule::load_options()
+ }
+ p = PyDict_GetItemString(pOption, "see_also");
+ if (p && PyObject_TypeCheck(p, &PyList_Type)) {
+- for (unsigned i = 0; i < PyList_Size(p); ++i) {
++ for (Py_ssize_t i = 0; i < PyList_Size(p); ++i) {
+ auto q = PyList_GetItem(p, i);
+ if (q && PyObject_TypeCheck(q, &PyUnicode_Type)) {
+ option.see_also.insert(PyUnicode_AsUTF8(q));
+@@ -582,7 +582,7 @@ int PyModule::load_options()
+ }
+ p = PyDict_GetItemString(pOption, "tags");
+ if (p && PyObject_TypeCheck(p, &PyList_Type)) {
+- for (unsigned i = 0; i < PyList_Size(p); ++i) {
++ for (Py_ssize_t i = 0; i < PyList_Size(p); ++i) {
+ auto q = PyList_GetItem(p, i);
+ if (q && PyObject_TypeCheck(q, &PyUnicode_Type)) {
+ option.tags.insert(PyUnicode_AsUTF8(q));
+commit 3bf4b32c9bd15652b24bc4b8c8ea07fb6bb04357
+Author: Kefu Chai <kchai@redhat.com>
+Date: Sat May 8 20:51:19 2021 +0800
+
+ os/bluestore: print size_t using %xz
+
+ we cannot assume that size_t is an alias of "long"
+
+ Signed-off-by: Kefu Chai <kchai@redhat.com>
+
+diff --git a/src/os/bluestore/Allocator.cc b/src/os/bluestore/Allocator.cc
+index 75f3172ca5..3428545414 100644
+--- a/src/os/bluestore/Allocator.cc
++++ b/src/os/bluestore/Allocator.cc
+@@ -81,8 +81,8 @@ public:
+ f->open_object_section("free");
+ char off_hex[30];
+ char len_hex[30];
+- snprintf(off_hex, sizeof(off_hex) - 1, "0x%lx", off);
+- snprintf(len_hex, sizeof(len_hex) - 1, "0x%lx", len);
++ snprintf(off_hex, sizeof(off_hex) - 1, "0x%zx", off);
++ snprintf(len_hex, sizeof(len_hex) - 1, "0x%zx", len);
+ f->dump_string("offset", off_hex);
+ f->dump_string("length", len_hex);
+ f->close_section();
+commit 3af466ee84209896f8671046c837350e736f15de
+Author: Kefu Chai <kchai@redhat.com>
+Date: Sat May 8 20:50:08 2021 +0800
+
+ client: print int64_t using PRId64
+
+ we cannot assume that int64_t is an alias of "long"
+
+ Signed-off-by: Kefu Chai <kchai@redhat.com>
+
+diff --git a/src/client/Client.cc b/src/client/Client.cc
+index acdd8f0934..7352824f6c 100644
+--- a/src/client/Client.cc
++++ b/src/client/Client.cc
+@@ -12772,7 +12772,7 @@ size_t Client::_vxattrcb_cluster_fsid(Inode *in, char *val, size_t size)
+ size_t Client::_vxattrcb_client_id(Inode *in, char *val, size_t size)
+ {
+ auto name = messenger->get_myname();
+- return snprintf(val, size, "%s%ld", name.type_str(), name.num());
++ return snprintf(val, size, "%s%" PRId64, name.type_str(), name.num());
+ }
+
+ #define CEPH_XATTR_NAME(_type, _name) "ceph." #_type "." #_name
diff --git a/skip/ceph/35-fix_ErasureCodeShec.patch b/skip/ceph/35-fix_ErasureCodeShec.patch
new file mode 100644
index 0000000..0ab2cc2
--- /dev/null
+++ b/skip/ceph/35-fix_ErasureCodeShec.patch
@@ -0,0 +1,17 @@
+patch by Vladimir Bashkirtsev
+
+diff -uNr ceph-15.2.4/src/erasure-code/shec/ErasureCodeShec.cc ceph-15.2.4-fix_ErasureCodeShec/src/erasure-code/shec/ErasureCodeShec.cc
+--- ceph-15.2.4/src/erasure-code/shec/ErasureCodeShec.cc 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-fix_ErasureCodeShec/src/erasure-code/shec/ErasureCodeShec.cc 2020-11-14 00:46:20.029488684 +1030
+@@ -197,7 +197,10 @@
+ }
+ unsigned int k = get_data_chunk_count();
+ unsigned int m = get_chunk_count() - k;
+- unsigned blocksize = (*chunks.begin()).second.length();
++ unsigned blocksize = 0;
++ if (chunks.size() > 0) {
++ blocksize = (*chunks.begin()).second.length();
++ }
+ for (unsigned int i = 0; i < k + m; i++) {
+ if (chunks.find(i) == chunks.end()) {
+ bufferlist tmp;
diff --git a/skip/ceph/37-fix_tests.patch b/skip/ceph/37-fix_tests.patch
new file mode 100644
index 0000000..169f6ef
--- /dev/null
+++ b/skip/ceph/37-fix_tests.patch
@@ -0,0 +1,86 @@
+patch by Vladimir Bashkirtsev
+increase timeouts for armv7 ci
+
+diff -uNr ceph-15.2.4/src/test/CMakeLists.txt ceph-15.2.4-fix_tests/src/test/CMakeLists.txt
+--- ceph-15.2.4/src/test/CMakeLists.txt 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-fix_tests/src/test/CMakeLists.txt 2020-11-08 17:37:15.788767448 +1030
+@@ -528,11 +528,17 @@
+ # Run rbd-unit-tests separate so they an run in parallel
+ # For values see: src/include/rbd/features.h
+ add_ceph_test(run-rbd-unit-tests-N.sh ${CMAKE_CURRENT_SOURCE_DIR}/run-rbd-unit-tests.sh N)
++ set_tests_properties(run-rbd-unit-tests-N.sh PROPERTIES TIMEOUT 7200)
+ add_ceph_test(run-rbd-unit-tests-0.sh ${CMAKE_CURRENT_SOURCE_DIR}/run-rbd-unit-tests.sh 0)
++ set_tests_properties(run-rbd-unit-tests-0.sh PROPERTIES TIMEOUT 7200)
+ add_ceph_test(run-rbd-unit-tests-1.sh ${CMAKE_CURRENT_SOURCE_DIR}/run-rbd-unit-tests.sh 1)
++ set_tests_properties(run-rbd-unit-tests-1.sh PROPERTIES TIMEOUT 7200)
+ add_ceph_test(run-rbd-unit-tests-61.sh ${CMAKE_CURRENT_SOURCE_DIR}/run-rbd-unit-tests.sh 61)
++ set_tests_properties(run-rbd-unit-tests-61.sh PROPERTIES TIMEOUT 7200)
+ add_ceph_test(run-rbd-unit-tests-109.sh ${CMAKE_CURRENT_SOURCE_DIR}/run-rbd-unit-tests.sh 109)
++ set_tests_properties(run-rbd-unit-tests-109.sh PROPERTIES TIMEOUT 7200)
+ add_ceph_test(run-rbd-unit-tests-127.sh ${CMAKE_CURRENT_SOURCE_DIR}/run-rbd-unit-tests.sh 127)
++ set_tests_properties(run-rbd-unit-tests-127.sh PROPERTIES TIMEOUT 7200)
+ if(FREEBSD)
+ add_ceph_test(rbd-ggate.sh ${CMAKE_CURRENT_SOURCE_DIR}/rbd-ggate.sh)
+ endif(FREEBSD)
+@@ -546,6 +552,7 @@
+ #add_ceph_test(test_pidfile.sh ${CMAKE_CURRENT_SOURCE_DIR}/test_pidfile.sh)
+
+ add_ceph_test(smoke.sh ${CMAKE_CURRENT_SOURCE_DIR}/smoke.sh)
++set_tests_properties(smoke.sh PROPERTIES TIMEOUT 14400)
+
+ set_property(
+ TEST ${tox_tests}
+diff -uNr ceph-15.2.4/src/test/encoding/CMakeLists.txt ceph-15.2.4-fix_tests/src/test/encoding/CMakeLists.txt
+--- ceph-15.2.4/src/test/encoding/CMakeLists.txt 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-fix_tests/src/test/encoding/CMakeLists.txt 2020-11-08 17:37:15.789767451 +1030
+@@ -1,3 +1,5 @@
+ # scripts
+ add_ceph_test(check-generated.sh ${CMAKE_CURRENT_SOURCE_DIR}/check-generated.sh)
++set_tests_properties(check-generated.sh PROPERTIES TIMEOUT 18000)
+ add_ceph_test(readable.sh ${CMAKE_CURRENT_SOURCE_DIR}/readable.sh)
++set_tests_properties(readable.sh PROPERTIES TIMEOUT 18000)
+diff -uNr ceph-15.2.4/src/test/mgr/CMakeLists.txt ceph-15.2.4-fix_tests/src/test/mgr/CMakeLists.txt
+--- ceph-15.2.4/src/test/mgr/CMakeLists.txt 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-fix_tests/src/test/mgr/CMakeLists.txt 2020-11-08 17:37:15.790767454 +1030
+@@ -9,6 +9,7 @@
+ if(WITH_MGR_DASHBOARD_FRONTEND)
+ if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm|ARM")
+ add_ceph_test(mgr-dashboard-frontend-unittests ${CMAKE_SOURCE_DIR}/src/pybind/mgr/dashboard/run-frontend-unittests.sh)
++ set_tests_properties(mgr-dashboard-frontend-unittests PROPERTIES TIMEOUT 72000)
+ endif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm|ARM")
+
+ add_ceph_test(mgr-dashboard-smoke.sh ${CMAKE_CURRENT_SOURCE_DIR}/mgr-dashboard-smoke.sh)
+diff -uNr ceph-15.2.4/src/test/objectstore/CMakeLists.txt ceph-15.2.4-fix_tests/src/test/objectstore/CMakeLists.txt
+--- ceph-15.2.4/src/test/objectstore/CMakeLists.txt 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-fix_tests/src/test/objectstore/CMakeLists.txt 2020-11-08 17:37:15.791767457 +1030
+@@ -131,6 +131,7 @@
+ test_bluefs.cc
+ )
+ add_ceph_unittest(unittest_bluefs)
++ set_tests_properties(unittest_bluefs PROPERTIES TIMEOUT 7200)
+ target_link_libraries(unittest_bluefs os global)
+
+ # unittest_bluestore_types
+diff -uNr ceph-15.2.4/src/test/osd/CMakeLists.txt ceph-15.2.4-fix_tests/src/test/osd/CMakeLists.txt
+--- ceph-15.2.4/src/test/osd/CMakeLists.txt 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-fix_tests/src/test/osd/CMakeLists.txt 2020-11-08 17:41:54.515606236 +1030
+@@ -35,6 +35,7 @@
+
+ # scripts
+ add_ceph_test(safe-to-destroy.sh ${CMAKE_CURRENT_SOURCE_DIR}/safe-to-destroy.sh)
++set_tests_properties(safe-to-destroy.sh PROPERTIES TIMEOUT 7200)
+
+ # unittest_osdmap
+ add_executable(unittest_osdmap
+diff -uNr ceph-15.2.4/src/test/osd/TestOSDScrub.cc ceph-15.2.4-fix_tests/src/test/osd/TestOSDScrub.cc
+--- ceph-15.2.4/src/test/osd/TestOSDScrub.cc 2020-07-01 01:10:51.000000000 +0930
++++ ceph-15.2.4-fix_tests/src/test/osd/TestOSDScrub.cc 2020-11-08 17:37:15.793767463 +1030
+@@ -70,7 +70,7 @@
+ g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "0");
+ g_ceph_context->_conf.set_val("osd_scrub_end_hour", "24");
+ g_ceph_context->_conf.apply_changes(nullptr);
+- tm tm;
++ tm tm = {0};
+ tm.tm_isdst = -1;
+ strptime("2015-01-16 12:05:13", "%Y-%m-%d %H:%M:%S", &tm);
+ utime_t now = utime_t(mktime(&tm), 0);
diff --git a/skip/ceph/42-no-virtualenvs.patch b/skip/ceph/42-no-virtualenvs.patch
new file mode 100644
index 0000000..541b338
--- /dev/null
+++ b/skip/ceph/42-no-virtualenvs.patch
@@ -0,0 +1,71 @@
+based on gentoo patch
+use system node instead of nodeenv installing one as only availiable for x86 with musl
+
+--- a/cmake/modules/AddCephTest.cmake
++++ b/cmake/modules/AddCephTest.cmake
+@@ -68,14 +68,6 @@
+ endif()
+ string(REPLACE ";" "," tox_envs "${tox_envs}")
+ find_package(Python3 QUIET REQUIRED)
+- add_custom_command(
+- OUTPUT ${venv_path}/bin/activate
+- COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python="${Python3_EXECUTABLE}" ${venv_path}
+- WORKING_DIRECTORY ${tox_path}
+- COMMENT "preparing venv for ${name}")
+- add_custom_target(${name}-venv
+- DEPENDS ${venv_path}/bin/activate)
+- add_dependencies(tests ${name}-venv)
+ add_test(
+ NAME ${test_name}
+ COMMAND ${CMAKE_SOURCE_DIR}/src/script/run_tox.sh
+--- a/src/ceph-volume/CMakeLists.txt
++++ b/src/ceph-volume/CMakeLists.txt
+@@ -8,22 +8,6 @@
+ add_subdirectory(plugin/zfs)
+ endif()
+
+-# Required for running ceph-volume inventory in a vstart environment
+-set(CEPH_VOLUME_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/ceph-volume-virtualenv)
+-
+-add_custom_command(
+- OUTPUT ${CEPH_VOLUME_VIRTUALENV}/bin/python
+- COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${Python3_EXECUTABLE} ${CEPH_VOLUME_VIRTUALENV}
+- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/ceph-volume
+- COMMENT "ceph-volume venv is being created")
+-
+-add_custom_command(
+- OUTPUT ${CEPH_VOLUME_VIRTUALENV}/bin/ceph-volume
+- DEPENDS ${CEPH_VOLUME_VIRTUALENV}/bin/python
+- COMMAND . ${CEPH_VOLUME_VIRTUALENV}/bin/activate && ${CEPH_VOLUME_VIRTUALENV}/bin/python setup.py develop && deactivate
+- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/ceph-volume
+- COMMENT "${CMAKE_SOURCE_DIR}/src/ceph-volume")
+-
+ add_custom_target(ceph-volume-venv-setup
+ DEPENDS ${CEPH_VOLUME_VIRTUALENV}/bin/ceph-volume)
+
+--- a/src/pybind/mgr/dashboard/CMakeLists.txt
++++ b/src/pybind/mgr/dashboard/CMakeLists.txt
+@@ -5,9 +5,6 @@
+ set(multi_kw COMMAND DEPENDS)
+ cmake_parse_arguments(NC "${options}" "${single_kw}" "${multi_kw}" ${ARGN})
+ string(REPLACE ";" " " command "${NC_COMMAND}")
+- if(NC_NODEENV)
+- string(REGEX REPLACE "^(.*(npm|npx) .*)$" ". ${mgr-dashboard-nodeenv-dir}/bin/activate && \\1 && deactivate" command ${command})
+- endif()
+ string(REPLACE " " ";" command "${command}")
+ add_custom_command(
+ OUTPUT "${NC_OUTPUT}"
+@@ -51,11 +48,8 @@
+ set(node_mirror_opt "--mirror=$ENV{NODE_MIRROR}")
+ endif()
+ add_custom_command(
+- OUTPUT "${mgr-dashboard-nodeenv-dir}/bin/npm"
+- COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${MGR_PYTHON_EXECUTABLE} ${mgr-dashboard-nodeenv-dir}
+- COMMAND ${mgr-dashboard-nodeenv-dir}/bin/pip install nodeenv
+- COMMAND ${mgr-dashboard-nodeenv-dir}/bin/nodeenv --verbose ${node_mirror_opt} -p --node=12.18.2
+- COMMAND mkdir ${mgr-dashboard-nodeenv-dir}/.npm
++ OUTPUT "/usr/bin/npm"
++ COMMAND /usr/bin/nodeenv -p --node=system
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMENT "dashboard nodeenv is being installed"
+ )
diff --git a/skip/ceph/43-LogClock.h.patch b/skip/ceph/43-LogClock.h.patch
new file mode 100644
index 0000000..8cd02e5
--- /dev/null
+++ b/skip/ceph/43-LogClock.h.patch
@@ -0,0 +1,18 @@
+reported as issue
+https://tracker.ceph.com/issues/50133
+
+needed for 32bit platforms
+
+--- aa/src/log/LogClock.h
++++ bb/src/log/LogClock.h
+@@ -12,10 +12,6 @@
+ #include "include/ceph_assert.h"
+ #include "common/ceph_time.h"
+
+-#ifndef suseconds_t
+-typedef long suseconds_t;
+-#endif
+-
+ namespace ceph {
+ namespace logging {
+ namespace _logclock {
diff --git a/skip/ceph/44-aarch64-erasure.patch b/skip/ceph/44-aarch64-erasure.patch
new file mode 100644
index 0000000..421decc
--- /dev/null
+++ b/skip/ceph/44-aarch64-erasure.patch
@@ -0,0 +1,129 @@
+merged as:
+https://github.com/ceph/isa-l/commit/bee5180a1517f8b5e70b02fcd66790c623536c5d
+
+--- a/src/isa-l/erasure_code/aarch64/gf_2vect_mad_neon.S
++++ b/src/isa-l/erasure_code/aarch64/gf_2vect_mad_neon.S
+@@ -360,7 +360,8 @@
+ sub x_dest1, x_dest1, x_tmp
+ sub x_dest2, x_dest2, x_tmp
+
+- ldr x_const, =const_tbl
++ adrp x_const, const_tbl
++ add x_const, x_const, :lo12:const_tbl
+ sub x_const, x_const, x_tmp
+ ldr q_tmp, [x_const, #16]
+
+@@ -394,7 +395,7 @@
+ mov w_ret, #1
+ ret
+
+-.section .data
++.section .rodata
+ .balign 8
+ const_tbl:
+ .dword 0x0000000000000000, 0x0000000000000000
+--- a/src/isa-l/erasure_code/aarch64/gf_3vect_mad_neon.S
++++ b/src/isa-l/erasure_code/aarch64/gf_3vect_mad_neon.S
+@@ -332,7 +332,8 @@
+ sub x_dest2, x_dest2, x_tmp
+ sub x_dest3, x_dest3, x_tmp
+
+- ldr x_const, =const_tbl
++ adrp x_const, const_tbl
++ add x_const, x_const, :lo12:const_tbl
+ sub x_const, x_const, x_tmp
+ ldr q_tmp, [x_const, #16]
+
+@@ -374,7 +375,7 @@
+ mov w_ret, #1
+ ret
+
+-.section .data
++.section .rodata
+ .balign 8
+ const_tbl:
+ .dword 0x0000000000000000, 0x0000000000000000
+--- a/src/isa-l/erasure_code/aarch64/gf_4vect_mad_neon.S
++++ b/src/isa-l/erasure_code/aarch64/gf_4vect_mad_neon.S
+@@ -397,7 +397,8 @@
+ sub x_dest3, x_dest3, x_tmp
+ sub x_dest4, x_dest4, x_tmp
+
+- ldr x_const, =const_tbl
++ adrp x_const, const_tbl
++ add x_const, x_const, :lo12:const_tbl
+ sub x_const, x_const, x_tmp
+ ldr q_tmp, [x_const, #16]
+
+@@ -448,7 +449,7 @@
+ mov w_ret, #1
+ ret
+
+-.section .data
++.section .rodata
+ .balign 8
+ const_tbl:
+ .dword 0x0000000000000000, 0x0000000000000000
+--- a/src/isa-l/erasure_code/aarch64/gf_5vect_mad_neon.S
++++ b/src/isa-l/erasure_code/aarch64/gf_5vect_mad_neon.S
+@@ -463,7 +463,8 @@
+ sub x_dest4, x_dest4, x_tmp
+ sub x_dest5, x_dest5, x_tmp
+
+- ldr x_const, =const_tbl
++ adrp x_const, const_tbl
++ add x_const, x_const, :lo12:const_tbl
+ sub x_const, x_const, x_tmp
+ ldr q_tmp, [x_const, #16]
+
+@@ -527,7 +528,7 @@
+ mov w_ret, #1
+ ret
+
+-.section .data
++.section .rodata
+ .balign 8
+ const_tbl:
+ .dword 0x0000000000000000, 0x0000000000000000
+--- a/src/isa-l/erasure_code/aarch64/gf_6vect_mad_neon.S
++++ b/src/isa-l/erasure_code/aarch64/gf_6vect_mad_neon.S
+@@ -526,7 +526,8 @@
+ sub x_dest5, x_dest5, x_tmp
+ sub x_dest6, x_dest6, x_tmp
+
+- ldr x_const, =const_tbl
++ adrp x_const, const_tbl
++ add x_const, x_const, :lo12:const_tbl
+ sub x_const, x_const, x_tmp
+ ldr q_tmp, [x_const, #16]
+
+@@ -602,7 +603,7 @@
+ mov w_ret, #1
+ ret
+
+-.section .data
++.section .rodata
+ .balign 8
+ const_tbl:
+ .dword 0x0000000000000000, 0x0000000000000000
+--- a/src/isa-l/erasure_code/aarch64/gf_vect_mad_neon.S
++++ b/src/isa-l/erasure_code/aarch64/gf_vect_mad_neon.S
+@@ -281,7 +281,8 @@
+ mov x_src, x_src_end
+ sub x_dest1, x_dest1, x_tmp
+
+- ldr x_const, =const_tbl
++ adrp x_const, const_tbl
++ add x_const, x_const, :lo12:const_tbl
+ sub x_const, x_const, x_tmp
+ ldr q_tmp, [x_const, #16]
+
+@@ -307,7 +308,7 @@
+ mov w_ret, #1
+ ret
+
+-.section .data
++.section .rodata
+ .balign 8
+ const_tbl:
+ .dword 0x0000000000000000, 0x0000000000000000
diff --git a/skip/ceph/44-cmake-buildtype.patch b/skip/ceph/44-cmake-buildtype.patch
new file mode 100644
index 0000000..7112ce1
--- /dev/null
+++ b/skip/ceph/44-cmake-buildtype.patch
@@ -0,0 +1,38 @@
+updated: https://github.com/ceph/ceph/commit/6e4481316884f08daad624c1d997378daedf410e
+
+commit a7e3ece459111d157a20d05de3a92cf4dab6bde6
+Author: Kefu Chai <kchai@redhat.com>
+Date: Thu Jul 1 15:24:50 2021 +0800
+
+ cmake: set CMAKE_BUILD_TYPE only if .git exists
+
+ distros intend to fine tune the CFLAGS and CXXFLAGS by themselves, see
+
+ - https://git.alpinelinux.org/abuild/tree/abuild.conf
+ - https://wiki.archlinux.org/title/CMake_package_guidelines#CMake_undesired_behaviors
+ - https://github.com/Debian/debhelper/blob/5d1bb29841043d8e47ebbdd043e6cd086cad508e/lib/Debian/Debhelper/Buildsystem/cmake.pm#L16
+
+ so instead of setting a CMAKE_BUILD_TYPE when building from a
+ dist tarball, let's just leave it empty.
+
+ Signed-off-by: Kefu Chai <kchai@redhat.com>
+
+diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
+index 760a2ceb0c..6f35e87f90 100644
+--- a/src/CMakeLists.txt
++++ b/src/CMakeLists.txt
+@@ -156,12 +156,8 @@ elseif(CMAKE_CXX_COMPILER_ID STREQUAL Clang)
+ endif()
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL GNU)
+
+-if(NOT CMAKE_BUILD_TYPE)
+- if(EXISTS "${CMAKE_SOURCE_DIR}/.git")
+- set(default_build_type "Debug")
+- else()
+- set(default_build_type "RelWithDebInfo")
+- endif()
++if(NOT DEFINED CMAKE_BUILD_TYPE AND EXISTS "${CMAKE_SOURCE_DIR}/.git")
++ set(default_build_type "Debug")
+ set(CMAKE_BUILD_TYPE "${default_build_type}" CACHE
+ STRING "Default BUILD_TYPE is Debug, other options are: RelWithDebInfo, Release, and MinSizeRel." FORCE)
+ endif()
diff --git a/skip/ceph/44-missing-include.patch b/skip/ceph/44-missing-include.patch
new file mode 100644
index 0000000..f944255
--- /dev/null
+++ b/skip/ceph/44-missing-include.patch
@@ -0,0 +1,16 @@
+submitted as:
+https://github.com/ceph/ceph/pull/41470
+
+diff --git a/src/rgw/rgw_string.h b/src/rgw/rgw_string.h
+index 257daa9c1..90e64f98a 100644
+--- a/src/rgw/rgw_string.h
++++ b/src/rgw/rgw_string.h
+@@ -8,6 +8,8 @@
+ #include <stdlib.h>
+ #include <limits.h>
+ #include <string_view>
++#include <string>
++#include <stdexcept>
+
+ #include <boost/container/small_vector.hpp>
+
diff --git a/skip/ceph/44-staticcast.patch b/skip/ceph/44-staticcast.patch
new file mode 100644
index 0000000..ebe8bbf
--- /dev/null
+++ b/skip/ceph/44-staticcast.patch
@@ -0,0 +1,13 @@
+submitted as https://github.com/ceph/ceph/pull/40582
+
+--- a/src/common/buffer.cc
++++ b/src/common/buffer.cc
+@@ -2268,7 +2268,7 @@
+
+ void ceph::buffer::list::page_aligned_appender::_refill(size_t len) {
+ const size_t alloc = \
+- std::max((size_t)min_alloc, (len + CEPH_PAGE_SIZE - 1) & CEPH_PAGE_MASK);
++ std::max(static_cast<size_t>(min_alloc), static_cast<size_t>((len + CEPH_PAGE_SIZE - 1) & CEPH_PAGE_MASK));
+ auto new_back = \
+ ptr_node::create(buffer::create_page_aligned(alloc));
+ new_back->set_length(0); // unused, so far.
diff --git a/skip/ceph/ceph-user.pre-install b/skip/ceph/ceph-user.pre-install
new file mode 100644
index 0000000..bae4f09
--- /dev/null
+++ b/skip/ceph/ceph-user.pre-install
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+addgroup ceph -g 167 -S 2>/dev/null
+adduser ceph -u 167 -S -G ceph -s /sbin/nologin -h /var/lib/ceph -g "Ceph Daemons" 2> /dev/null
+exit 0
diff --git a/skip/ceph/ceph.confd b/skip/ceph/ceph.confd
new file mode 100644
index 0000000..32737b1
--- /dev/null
+++ b/skip/ceph/ceph.confd
@@ -0,0 +1,17 @@
+# Original source: https://gitweb.gentoo.org/repo/gentoo.git/tree/sys-cluster/ceph/files/ceph.confd-r5
+
+# Example
+
+# default ceph conf file
+#ceph_conf="/etc/ceph/ceph.conf"
+
+# Set RADOSGW_WANT_NAME_PARAM=y in order to make the init script add
+# a --name=client.${RC_SVCNAME} parameter to command_args for radosgw.*
+# service instances. This will make the service use a key by the name
+# of client.${RC_SVCNAME} instead of the default client.admin key.
+# A setting like this in the ceph config file can be used to customize
+# the rgw_data and keyring paths used by radosgw instances:
+# [client]
+# rgw_data = /var/lib/ceph/radosgw/$cluster-$id
+# keyring = /var/lib/ceph/radosgw/$cluster-$id/keyring
+RADOSGW_WANT_NAME_PARAM=n
diff --git a/skip/ceph/ceph.initd b/skip/ceph/ceph.initd
new file mode 100644
index 0000000..c522efa
--- /dev/null
+++ b/skip/ceph/ceph.initd
@@ -0,0 +1,118 @@
+#!/sbin/openrc-run
+
+# Original source: https://gitweb.gentoo.org/repo/gentoo.git/tree/sys-cluster/ceph/files/ceph.initd-r12
+
+# make sure /etc/conf.d/ceph gets loaded for ceph-mon etc
+_CONFD_FILE="${RC_SERVICE%/*}/../conf.d/${RC_SVCNAME%-*}"
+if [ -r "${_CONFD_FILE}" ]; then
+ . "${_CONFD_FILE}" || exit 1
+fi
+
+: "${ceph_conf:=/etc/ceph/ceph.conf}"
+daemon_type="${RC_SVCNAME#ceph-}"
+daemon_type="${daemon_type%%.*}"
+daemon_id="${RC_SVCNAME#ceph-*.}"
+daemon_id="${daemon_id:-0}"
+: "${rundir:=/run/ceph}"
+: "${user:=ceph}"
+: "${group:=ceph}"
+: "${rc_ulimit:=-n 1048576 -u 1048576}"
+
+pidfile="${rundir}/supervisor-${daemon_type}.${daemon_id}.pid"
+daemon_pidfile="${rundir}/${daemon_type}.${daemon_id}.pid"
+
+command="/usr/bin/${RC_SVCNAME%%.*}"
+command_args="-i ${daemon_id} --pid-file ${daemon_pidfile} -c ${ceph_conf}"
+extra_commands="${extra_commands} reload"
+command_args_foreground="--foreground"
+
+retry="${CEPH_TERMTIMEOUT:-TERM/120/KILL/5}"
+start_stop_daemon_args="--user ${user} --group ${group}"
+supervise_daemon_args="--user ${user} --group ${group}"
+
+: "${supervisor:=supervise-daemon}"
+: "${stdout:=/var/log/ceph/ceph}"
+: "${stderr:=/var/log/ceph/ceph}"
+: "${respawn_delay:=10}"
+: "${respawn_max:=5}"
+: "${respawn_period:=1800}"
+
+: "${osd_respawn_delay:=15}"
+: "{osd_respawn_max:=10}"
+
+: "{radosgw_respawn_max:=5}"
+: "${radosgw_respawn_period:=30}"
+
+depend() {
+ use dns logger
+ after net ntpd ntp-client chronyd
+ before netmount
+}
+
+is_type_valid() {
+ case ${daemon_type} in
+ mon|mds|osd|mgr|radosgw) return 0;;
+ *) return 1;;
+ esac
+}
+
+start_pre() {
+ local log_dir
+ export CEPH_CONF="${ceph_conf}"
+
+ checkpath -d -q -o "${user}:${group}" "${rundir}"
+
+ if ! is_type_valid ;then
+ eerror "Please give valid Ceph Server Type: mds, mon, osd"
+ return 1
+
+ elif pgrep -f "[c]eph-${daemon_type} -i ${daemon_id} "; then
+ eerror "${daemon_type}.${daemon_id} is still running, refusing to start"
+ return 1
+ fi
+
+ if [ -n "${bluestore_osd_fsid}" ]; then
+ einfo "Mounting Bluestore"
+ ceph-volume lvm activate "${daemon_id}" "${bluestore_osd_fsid}" --no-systemd
+ fi
+
+ if [ "${daemon_type}" = radosgw ] && [ "${RADOSGW_WANT_NAME_PARAM}" = y ]; then
+ command_args="${command_args} --name client.${daemon_id}"
+ fi
+
+ local arg_name arg_val repl_arg_name
+ for arg_name in stdout stderr respawn_delay respawn_max respawn_period; do
+ eval arg_val="\${${daemon_type}_${arg_name}}"
+
+ if [ -z "${arg_val}" ]; then
+ eval arg_val="\${${arg_name}}"
+ else
+ eval "${arg_name}=\"${arg_val}\""
+ fi
+
+ if [ "${arg_name}" = "stderr" ] || [ "${arg_name}" = "stdout" ]; then
+ local log_file log_postfix
+ log_postfix=".${daemon_id}-${arg_name}.log"
+ log_file="${arg_val}"
+
+ if [ "${log_file}" != /dev/null ]; then
+ log_file="${log_file}${log_postfix}"
+
+ log_dir="$(dirname "${log_file}")"
+ checkpath -m 0755 -o "${user}:${group}" -d "${log_dir}"
+ fi
+
+ repl_arg_name="$(printf -- "%s\n" "${arg_name}" | tr _ -)"
+ supervise_daemon_args="${supervise_daemon_args} --${repl_arg_name}=${log_file}"
+ fi
+ done
+}
+
+reload() {
+ ebegin "Reloading Ceph ${daemon_type}.${daemon_id}"
+ start-stop-daemon --signal 1 "${start_stop_daemon_args}"
+ eend ${?}
+}
+
+# vim:ft=gentoo-init-d:ts=4:sts=4:sw=4:noet:
+
diff --git a/skip/ceph/ceph.xibuild b/skip/ceph/ceph.xibuild
new file mode 100644
index 0000000..38f3082
--- /dev/null
+++ b/skip/ceph/ceph.xibuild
@@ -0,0 +1,120 @@
+#!/bin/sh
+
+NAME="ceph"
+DESC="Ceph is a distributed object store and file system"
+
+MAKEDEPS="acl argp-standalone bc boost btrfs-progs bzip2 cmake cryptsetup cunit curl cython diffutils doxygen eudev expat fcgi flex fmt fuse fuse git graphviz grep gperf jq keyutils leveldb libaio libcap-ng libedit librdkafka libnl libtirpc libtool libxml2 linux-headers lua lvm2 lz4 nodejs nss oath-toolkit libldap openssl procps-ng python python-prettytable python-sphinx rabbitmq-c readline ninja snappy sqlite3 userspace-rcu xfsprogs xmlstarlet yarn yasm cryptsetup e2fsprogs parted util-linux xfsprogs fuse snappy lz4 lvm2 xmlstarlet python-coverage python-flake8 python-nodeenv python-nose python-pytest python-tox npm"
+
+PKG_VER=16.2.9
+SOURCE="https://download.ceph.com/tarballs/ceph_$PKG_VER.orig.tar.gz"
+
+ADDITIONAL="
+10-musl-fixes.patch
+11-dump_time_header_impl.patch
+11-parse_rfc1123_alt.patch
+11-s3_expiration_header.patch
+12-package.json-resolutions.patch
+20-pci.patch
+32-PurgeQueue.cc-cast.patch
+32-upstream32bit.patch
+32-upstream32bitcleanup.patch
+35-fix_ErasureCodeShec.patch
+37-fix_tests.patch
+42-no-virtualenvs.patch
+43-LogClock.h.patch
+44-aarch64-erasure.patch
+44-cmake-buildtype.patch
+44-missing-include.patch
+44-staticcast.patch
+ceph.confd
+ceph.initd
+"
+
+_py3_sitelib() {
+ python -c "import site; print(site.getsitepackages()[0])"
+}
+
+prepare() {
+ apply_patches
+
+ # delete bundled boost as >300mb and using system boost
+ rm -rf src/boost
+}
+
+build() {
+ export CEPH_BUILD_VIRTUALENV="$BUILD_ROOT"
+
+ # builders keep failing when -jN == nproc
+ export MAKEFLAGS="$MAKEFLAGS -j$((JOBS<12 ? JOBS : 12))"
+
+ # use alternate registry as original can timeout for arm32bit
+ export NPM_REGISTRY=https://registry.npmjs.org
+
+ cmake -B build -G Ninja \
+ -DCMAKE_BUILD_TYPE=MinSizeRel \
+ -DLUA_LIBRARIES=/usr/lib/liblua.so \
+ -DALLOCATOR=libc \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DCMAKE_INSTALL_LIBDIR=/usr/lib \
+ -DCMAKE_INSTALL_LOCALSTATEDIR=/var \
+ -DCMAKE_INSTALL_SYSCONFDIR=/etc \
+ -DWITH_REENTRANT_STRSIGNAL=ON \
+ -DWITH_THREAD_SAFE_RES_QUERY=ON \
+ -DWITH_MANPAGE=ON \
+ -DWITH_SYSTEM_BOOST=ON \
+ -DWITH_SYSTEM_NPM=ON \
+ -DWITH_LTTNG=OFF \
+ -DWITH_RDMA=OFF \
+ -DWITH_SYSTEMD=OFF \
+ -DWITH_SPDK=OFF \
+ -DWITH_BABELTRACE=OFF \
+ -DWITH_RADOSGW_AMQP_ENDPOINT=OFF \
+ -DWITH_TESTS=OFF
+ mkdir -p build/src/pybind/mgr/dashboard/cypress
+ cmake --build build
+
+}
+
+package() {
+ # free up some space before install
+ rm -rf build/src/pybind/mgr/dashboard/cypress
+ rm -rf src/pybind/mgr/dashboard/frontend/node_modules
+
+ DESTDIR="$PKG_DEST" cmake --install build
+ # yarn creates an empty usr/local/bin
+ rm -rf "${pkgdir:?}"/usr/local
+
+ # fix /usr permission
+ chmod 755 "$PKG_DEST/usr"
+
+ # remove dashboard angular app source
+ rm -rf "$PKG_DEST"/usr/share/ceph/mgr/dashboard/frontend/src
+
+ # remove the upstream init file and put in openrc ones
+ rm -f "$PKG_DEST"/etc/init.d/ceph
+ install -D -m 755 "$BUILD_ROOT"/"ceph".initd "$PKG_DEST"/etc/init.d/ceph
+ install -D -m 644 "$BUILD_ROOT"/"ceph".confd "$PKG_DEST"/etc/conf.d/ceph
+
+ # move mount.* binaries to /sbin
+ mkdir -p "$PKG_DEST"/sbin
+ mv "$PKG_DEST"/usr/sbin/mount.* "$PKG_DEST"/sbin
+
+ install -m 644 -D src/etc-rbdmap "$PKG_DEST"/etc/ceph/rbdmap
+ install -m 644 -D src/logrotate.conf "$PKG_DEST"/etc/logrotate.d/ceph
+ install -m 644 -D etc/sysctl/90-ceph-osd.conf "$PKG_DEST"/etc/sysctl.d/90-ceph-osd.conf
+
+ # udev rules
+ install -m 644 -D udev/50-rbd.rules "$PKG_DEST"/etc/udev/rules.d/50-rbd.rules
+ # sudoers.d
+ install -m 600 -D sudoers.d/ceph-smartctl "$PKG_DEST"/etc/sudoers.d/ceph-smartctl
+
+ # delete systemd related stuff
+ rm "$PKG_DEST"/usr/sbin/ceph-volume-systemd
+
+ # move docs to docs
+ mkdir -p "$PKG_DEST"/usr/share/doc/ceph/dashboard
+ mv "$PKG_DEST"/usr/share/ceph/mgr/dashboard/*.rst "$PKG_DEST"/usr/share/doc/ceph/dashboard/
+ mv "$PKG_DEST"/usr/share/ceph/mgr/cephadm/HACKING.rst "$PKG_DEST"/usr/share/doc/ceph/cephadm-HACKING.rst
+}
+
+# TODO split this into other packages
diff --git a/skip/dotnet-sdk/dotnet-sdk.xibuild b/skip/dotnet-sdk/dotnet-sdk.xibuild
new file mode 100644
index 0000000..e4ed99a
--- /dev/null
+++ b/skip/dotnet-sdk/dotnet-sdk.xibuild
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+NAME="dotnet-sdk"
+DESC="Core functionality needed to create .NET Core projects"
+
+MAKEDEPS="make "
+
+PKG_VER=6.0.301
+SOURCE="https://github.com/dotnet/sdk/archive/refs/tags/v$PKG_VER.tar.gz"
+
+build () {
+ ./build.sh
+}
+
+package () {
+ make PREFIX=/usr DESTDIR=$PKG_DEST install
+}
diff --git a/skip/zynaddsubfx/cmake-build-type-none.patch b/skip/zynaddsubfx/cmake-build-type-none.patch
new file mode 100644
index 0000000..74e64a7
--- /dev/null
+++ b/skip/zynaddsubfx/cmake-build-type-none.patch
@@ -0,0 +1,47 @@
+Index: src/CMakeLists.txt
+===================================================================
+--- a/src/CMakeLists.txt
++++ b/src/CMakeLists.txt
+@@ -185,7 +185,6 @@ option (BuildForDebug "Include gdb debug
+ option (IncludeWhatYouUse "Check for useless includes" OFF)
+ mark_as_advanced(IncludeWhatYouUse)
+
+-set(CMAKE_BUILD_TYPE "Release")
+
+
+ set (BuildOptions_x86_64AMD
+@@ -321,34 +320,6 @@ if(NOT AVOID_ASM)
+ add_definitions(-DASM_F2I_YES)
+ endif()
+
+-if (BuildForDebug)
+- set (CMAKE_BUILD_TYPE "Debug")
+- set (CMAKE_CXX_FLAGS_DEBUG ${BuildOptionsDebug})
+- message (STATUS "Building for ${CMAKE_BUILD_TYPE}, flags: ${CMAKE_CXX_FLAGS_DEBUG}")
+-else (BuildForDebug)
+- set (CMAKE_BUILD_TYPE "Release")
+-
+- set (CMAKE_CXX_FLAGS_RELEASE ${BuildOptionsBasic})
+-
+- if (BuildForAMD_X86_64)
+- set (CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${BuildOptions_x86_64AMD}")
+- endif (BuildForAMD_X86_64)
+-
+- if (BuildForCore2_X86_64)
+- set (CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${BuildOptions_X86_64Core2}")
+- endif (BuildForCore2_X86_64)
+-
+- if (SUPPORT_SSE)
+- set (CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${BuildOptions_SSE}")
+- endif (SUPPORT_SSE)
+-
+- if (SUPPORT_NEON AND NOT NoNeonPlease)
+- set (CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${BuildOptions_NEON}")
+- endif (SUPPORT_NEON AND NOT NoNeonPlease)
+-
+- message (STATUS "Building for ${CMAKE_BUILD_TYPE}, flags: ${CMAKE_CXX_FLAGS_RELEASE}")
+-endif (BuildForDebug)
+-
+ if(NOT (${CMAKE_SYSTEM_NAME} STREQUAL "Windows"))
+ add_definitions(-fPIC)
+ endif()
diff --git a/skip/zynaddsubfx/fix-bogus-strstr.patch b/skip/zynaddsubfx/fix-bogus-strstr.patch
new file mode 100644
index 0000000..8588045
--- /dev/null
+++ b/skip/zynaddsubfx/fix-bogus-strstr.patch
@@ -0,0 +1,16 @@
+Source: @pullmoll
+Upstream: no
+Reason: This piece of code is ill-conceived and cannot work. Remove it.
+
+--- a/rtosc/src/dispatch.c 2019-03-10 17:16:45.000000000 +0100
++++ b/rtosc/src/dispatch.c 2020-03-29 09:02:42.916119722 +0200
+@@ -289,8 +289,7 @@
+ return false;
+ } else if(type == 4) {
+ //extract substring
+- const char *sub=NULL;
+- return strstr(a,sub);
++ return false;
+ } else if(type == RTOSC_MATCH_OPTIONS || type == 6) {
+ return false;
+ } else if(type == RTOSC_MATCH_ENUMERATED) {
diff --git a/skip/zynaddsubfx/fix-memset.patch b/skip/zynaddsubfx/fix-memset.patch
new file mode 100644
index 0000000..1439804
--- /dev/null
+++ b/skip/zynaddsubfx/fix-memset.patch
@@ -0,0 +1,11 @@
+--- a/src/globals.h 2015-06-28 00:25:59.000000000 +0200
++++ b/src/globals.h 2015-10-03 15:34:18.914712672 +0200
+@@ -25,6 +25,8 @@
+ #ifndef GLOBALS_H
+ #define GLOBALS_H
+
++#include <string.h> /* memset(3) */
++
+ #if defined(__clang__)
+ #define REALTIME __attribute__((annotate("realtime")))
+ #define NONREALTIME __attribute__((annotate("nonrealtime")))
diff --git a/skip/zynaddsubfx/zynaddsubfx.xibuild b/skip/zynaddsubfx/zynaddsubfx.xibuild
new file mode 100644
index 0000000..228806b
--- /dev/null
+++ b/skip/zynaddsubfx/zynaddsubfx.xibuild
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+NAME="zynaddsubfx"
+DESC="Opensource software synthesizer capable of making a countless number of instruments."
+
+MAKEDEPS="cmake jack fltk portaudio mxml sndio bash-completion liblo libxpm"
+DEPS="less "
+
+PKG_VER=3.0.5
+SOURCE="https://downloads.sourceforge.net/project/zynaddsubfx/zynaddsubfx/$PKG_VER/zynaddsubfx-$PKG_VER.tar.bz2"
+ADDITIONAL="
+cmake-build-type-none.patch
+fix-bogus-strstr.patch
+fix-memset.patch
+"
+prepare () {
+ apply_patches
+ sed -e '/-DASM_F2I_YES/d' -i src/CMakeLists.txt
+
+ sed -e 's/COMMAND.*lv2-ttl-generator/COMMAND lv2-ttl-generator/g' -i \
+ src/Plugin/AlienWah/CMakeLists.txt \
+ src/Plugin/Chorus/CMakeLists.txt \
+ src/Plugin/Distortion/CMakeLists.txt \
+ src/Plugin/DynamicFilter/CMakeLists.txt \
+ src/Plugin/Echo/CMakeLists.txt \
+ src/Plugin/Phaser/CMakeLists.txt \
+ src/Plugin/Reverb/CMakeLists.txt \
+ src/Plugin/ZynAddSubFX/CMakeLists.txt
+
+ sed -i -e 's;gcc ;${CC} ;' \
+ -e 's;.fltk-config --cflags.;& ${CFLAGS};' \
+ -e 's;.fltk-config --ldflags.;& ${LDFLAGS};' \
+ ExternalPrograms/Spliter/Makefile \
+ ExternalPrograms/Controller/Makefile
+}
+
+build () {
+ cmake -B build \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DCMAKE_INSTALL_LIBDIR=/usr/lib \
+ -DDefaultOutput=jack \
+ -DDefaultInput=jack \
+ -DGuiModule=fltk
+ cmake --build build &&
+ make -C ExternalPrograms/Spliter &&
+ make -C ExternalPrograms/Controller
+}
+
+package () {
+ DESTDIR="$PKG_DEST" cmake --install build
+}