From 53d11f8d2026701aa74a690cb7375546830d4c70 Mon Sep 17 00:00:00 2001 From: "Kyle J. McKay" Date: Sat, 15 Aug 2020 21:54:57 -0700 Subject: [PATCH] taskd/clone.sh: do not rely on SIGXFSZ error The actual git process that ends up dying as a result of a SIGXFSZ when it attempts to create a file that's too large will almost certainly not be the initial git process that's running the fetch. As a result, the actual error exit code will end up being something much less helpful. Attempt to detect this situation by looking for any files that have the same size as the file size limit and trigger the exceeds file size limit that way as well. Signed-off-by: Kyle J. McKay --- taskd/clone.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/taskd/clone.sh b/taskd/clone.sh index dc6c74a..8e9c839 100755 --- a/taskd/clone.sh +++ b/taskd/clone.sh @@ -89,10 +89,20 @@ send_clone_failed() { trap "" EXIT # We must now close the .clonelog file that is open on stdout and stderr exec >/dev/null 2>&1 + # It would be nice if git propagated the SIGXFSZ error on up to the shell, + # perhaps it will at some point in the future. In any case, the only file + # that might be too big would end up in the objects subdirectory. + # Search for any files of size $cfg_max_file_size512 blocks (if set) or + # larger and trigger the too big failure that way as well. + toobig= + if [ "${cfg_max_file_size512:-0}" != "0" ]; then + toobig="$(find -H objects -type f -size +$(( $cfg_max_file_size512 - 1 )) -print 2>/dev/null | + head -n 1)" || : + fi failaddrs="$(config_get owner)" || : ccadm="${cfg_admincc:-0}" xfsz_err="" - if [ "${exit_err:-0}" = "${var_xfsz_err:-999}" ]; then + if [ -n "$toobig" ] || [ "${exit_err:-0}" = "${var_xfsz_err:-999}" ]; then ccadm=1 reposize="$(cd objects && du -sk . | LC_ALL=C awk '{print $1}')" || : if [ -n "$reposize" ]; then -- 2.11.4.GIT