2216 lines
72 KiB
Plaintext
Executable File
2216 lines
72 KiB
Plaintext
Executable File
# This file is part of Autoconf. -*- Autoconf -*-
|
||
# M4 macros used in building test suites.
|
||
m4_define([_AT_COPYRIGHT_YEARS], [
|
||
Copyright (C) 2000-2012 Free Software Foundation, Inc.
|
||
])
|
||
|
||
# This file is part of Autoconf. This program is free
|
||
# software; you can redistribute it and/or modify it under the
|
||
# terms of the GNU General Public License as published by the
|
||
# Free Software Foundation, either version 3 of the License, or
|
||
# (at your option) any later version.
|
||
#
|
||
# This program is distributed in the hope that it will be useful,
|
||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||
# GNU General Public License for more details.
|
||
#
|
||
# Under Section 7 of GPL version 3, you are granted additional
|
||
# permissions described in the Autoconf Configure Script Exception,
|
||
# version 3.0, as published by the Free Software Foundation.
|
||
#
|
||
# You should have received a copy of the GNU General Public License
|
||
# and a copy of the Autoconf Configure Script Exception along with
|
||
# this program; see the files COPYINGv3 and COPYING.EXCEPTION
|
||
# respectively. If not, see <http://www.gnu.org/licenses/>.
|
||
|
||
|
||
# _m4_divert(DIVERSION-NAME)
|
||
# --------------------------
|
||
# Convert a diversion name into its number. Otherwise, return
|
||
# DIVERSION-NAME which is supposed to be an actual diversion number.
|
||
# Of course it would be nicer to use m4_case here, instead of zillions
|
||
# of little macros, but it then takes twice longer to run `autoconf'!
|
||
#
|
||
# From M4sugar:
|
||
# -1. KILL
|
||
# 10000. GROW
|
||
#
|
||
# From M4sh:
|
||
# 0. BINSH
|
||
# 1. HEADER-REVISION
|
||
# 2. HEADER-COMMENT
|
||
# 3. HEADER-COPYRIGHT
|
||
# 4. M4SH-SANITIZE
|
||
# 5. M4SH-INIT
|
||
# 1000. BODY
|
||
#
|
||
# Defined below:
|
||
# - DEFAULTS
|
||
# Overall initialization, value of $at_groups_all.
|
||
# - PARSE_ARGS_BEGIN
|
||
# Setup defaults required for option processing.
|
||
# - PARSE_ARGS
|
||
# Option processing. After AT_INIT, user options can be entered here as
|
||
# cases of a case statement.
|
||
# - PARSE_ARGS_END
|
||
# Finish up the option processing.
|
||
#
|
||
# - HELP
|
||
# Start printing the help message.
|
||
# - HELP_MODES
|
||
# Modes help text. Additional modes can be appended as self-contained
|
||
# cat'd here-docs as generated by AS_HELP_STRING.
|
||
# - HELP_TUNING_BEGIN
|
||
# Tuning help text. This is for Autotest-provided text.
|
||
# - HELP_TUNING
|
||
# Additional tuning options' help text can be appended here as
|
||
# self-contained cat'd here-docs as generated by AS_HELP_STRING.
|
||
# - HELP_OTHER
|
||
# User help can be appended to this as self-contained cat'd here-docs.
|
||
# - HELP_END
|
||
# Finish up the help texts.
|
||
#
|
||
# - VERSION
|
||
# Head of the handling of --version.
|
||
# - VERSION_NOTICES
|
||
# Copyright notices for --version.
|
||
# - VERSION_END
|
||
# Tail of the handling of --version.
|
||
#
|
||
# - BANNERS
|
||
# Output shell initialization for the associative array of banner text.
|
||
# - TESTS_BEGIN
|
||
# Like DEFAULTS but run after argument processing for purposes of
|
||
# optimization. Do anything else that needs to be done to prepare for
|
||
# tests. Sets up verbose and log file descriptors. Sets and logs PATH.
|
||
# - PREPARE_TESTS
|
||
# Declares functions shared among the tests. Perform any user
|
||
# initialization to be shared among all tests.
|
||
# - TESTS
|
||
# The core of the test suite.
|
||
#
|
||
# - TEST_SCRIPT
|
||
# The collector for code for each test, the ``normal'' diversion, but
|
||
# undiverted into other locations before final output.
|
||
#
|
||
# - TEST_GROUPS
|
||
# Contents of each test group. The tests deliberately occur after the
|
||
# end of the shell script, so that the shell need not spend time parsing
|
||
# commands it will not execute.
|
||
|
||
m4_define([_m4_divert(DEFAULTS)], 100)
|
||
m4_define([_m4_divert(PARSE_ARGS_BEGIN)], 200)
|
||
m4_define([_m4_divert(PARSE_ARGS)], 201)
|
||
m4_define([_m4_divert(PARSE_ARGS_END)], 202)
|
||
m4_define([_m4_divert(HELP)], 300)
|
||
m4_define([_m4_divert(HELP_MODES)], 301)
|
||
m4_define([_m4_divert(HELP_TUNING_BEGIN)], 302)
|
||
m4_define([_m4_divert(HELP_TUNING)], 303)
|
||
m4_define([_m4_divert(HELP_OTHER)], 304)
|
||
m4_define([_m4_divert(HELP_END)], 305)
|
||
m4_define([_m4_divert(VERSION)], 350)
|
||
m4_define([_m4_divert(VERSION_NOTICES)], 351)
|
||
m4_define([_m4_divert(VERSION_END)], 352)
|
||
m4_define([_m4_divert(BANNERS)], 400)
|
||
m4_define([_m4_divert(TESTS_BEGIN)], 401)
|
||
m4_define([_m4_divert(PREPARE_TESTS)], 402)
|
||
m4_define([_m4_divert(TESTS)], 403)
|
||
m4_define([_m4_divert(TEST_SCRIPT)], 450)
|
||
m4_define([_m4_divert(TEST_GROUPS)], 500)
|
||
|
||
|
||
# AT_LINE
|
||
# -------
|
||
# Return the current file sans directory, a colon, and the current
|
||
# line. Be sure to return a _quoted_ file name, so if, for instance,
|
||
# the user is lunatic enough to have a file named `dnl' (and I, for
|
||
# one, love to be brainless and stubborn sometimes), then we return a
|
||
# quoted name.
|
||
#
|
||
# Gee, we can't use simply
|
||
#
|
||
# m4_bpatsubst(__file__, [^.*/\(.*\)], [[\1]])
|
||
#
|
||
# since then, since `dnl' doesn't match the pattern, it is returned
|
||
# with once quotation level less, so you lose! And since GNU M4
|
||
# is one of the biggest junk in the whole universe wrt regexp, don't
|
||
# even think about using `?' or `\?'. Bah, `*' will do.
|
||
# Pleeeeeeeease, Gary, provide us with dirname and ERE!
|
||
#
|
||
# M4 recompiles the regular expression for every m4_bpatsubst, but __file__
|
||
# rarely changes. Be fast - only compute the dirname when necessary; for
|
||
# autoconf alone, this shaves off several seconds in building testsuite.
|
||
m4_define([_AT_LINE_file])
|
||
m4_define([_AT_LINE_base])
|
||
m4_define([AT_LINE],
|
||
[m4_if(m4_defn([_AT_LINE_file]), __file__, [],
|
||
[m4_do([m4_define([_AT_LINE_file], __file__)],
|
||
[m4_define([_AT_LINE_base],
|
||
m4_bregexp(/__file__, [/\([^/]*\)$], [[\1]]))])])dnl
|
||
m4_defn([_AT_LINE_base]):__line__])
|
||
|
||
# _AT_LINE_ESCAPED
|
||
# ----------------
|
||
# Same as AT_LINE, but already escaped for the shell.
|
||
m4_define([_AT_LINE_ESCAPED], ["AS_ESCAPE(m4_dquote(AT_LINE))"])
|
||
|
||
|
||
# _AT_NORMALIZE_TEST_GROUP_NUMBER(SHELL-VAR)
|
||
# ------------------------------------------
|
||
# Normalize SHELL-VAR so that its value has the same number of digits as
|
||
# all the other test group numbers.
|
||
m4_define([_AT_NORMALIZE_TEST_GROUP_NUMBER],
|
||
[
|
||
eval 'while :; do
|
||
case $$1 in #(
|
||
'"$at_format"'*) break;;
|
||
esac
|
||
$1=0$$1
|
||
done'
|
||
])
|
||
|
||
# _AT_DEFINE_INIT(NAME, [DEFINITION])
|
||
# -----------------------------------
|
||
# Define macro NAME to die if invoked prior to AT_INIT, and to DEFINITION
|
||
# after AT_INIT.
|
||
m4_define([_AT_DEFINE_INIT],
|
||
[m4_define($@)m4_pushdef([$1], [m4_fatal([$1: missing AT_INIT detected])])dnl
|
||
m4_append([_AT_DEFINE_INIT_LIST], [[$1]], [,])])
|
||
|
||
# _AT_DEFINE_SETUP(NAME, [DEFINITION])
|
||
# ------------------------------------
|
||
# Define macro NAME to die if invoked outside AT_SETUP/AT_CLEANUP, and
|
||
# to DEFINITION otherwise.
|
||
m4_define([_AT_DEFINE_SETUP],
|
||
[m4_define([$1], [m4_ifndef([AT_ingroup],
|
||
[m4_fatal([$1: missing AT_SETUP detected])])$2])])
|
||
|
||
|
||
# AT_INIT([TESTSUITE-NAME])
|
||
# -------------------------
|
||
# Begin test suite.
|
||
m4_define([AT_INIT],
|
||
[m4_pushdef([AT_INIT], [m4_fatal([$0: invoked multiple times])])]
|
||
[m4_pattern_forbid([^_?AT_])]
|
||
[m4_pattern_allow([^_ATEOF$])]
|
||
[m4_ifndef([AT_PACKAGE_BUGREPORT], [m4_fatal(
|
||
[$1: AT_PACKAGE_BUGREPORT is missing, consider writing package.m4])])]
|
||
[m4_define([AT_TESTSUITE_NAME],
|
||
m4_defn([AT_PACKAGE_STRING])[ test suite]m4_ifval([$1],
|
||
[m4_expand([: $1])]))]
|
||
[m4_define([AT_ordinal], 0)]
|
||
[m4_define([AT_banner_ordinal], 0)]
|
||
[m4_define([AT_help_all], [])]
|
||
[m4_map_args([_m4_popdef], _AT_DEFINE_INIT_LIST)]
|
||
[m4_wrap([_AT_FINISH])]
|
||
[AS_INIT[]]dnl
|
||
dnl We don't use m4sh's BODY diversion, but AS_INIT sticks a banner there.
|
||
dnl This trick removes that banner, since it adds nothing to autotest.
|
||
[m4_cleardivert([BODY])]dnl
|
||
[AS_ME_PREPARE[]]dnl
|
||
[m4_divert_push([DEFAULTS])]dnl
|
||
[AT_COPYRIGHT(m4_defn([_AT_COPYRIGHT_YEARS]), [
|
||
m4_copyright_condense])]
|
||
[AT_COPYRIGHT(
|
||
[This test suite is free software; the Free Software Foundation gives
|
||
unlimited permission to copy, distribute and modify it.], [m4_echo])]
|
||
[AS_PREPARE
|
||
|
||
SHELL=${CONFIG_SHELL-/bin/sh}
|
||
|
||
# How were we run?
|
||
at_cli_args="$[@]"
|
||
|
||
m4_divert_push([BANNERS])dnl
|
||
|
||
# Should we print banners? Yes if more than one test is run.
|
||
case $at_groups in #(
|
||
*$as_nl* )
|
||
at_print_banners=: ;; #(
|
||
* ) at_print_banners=false ;;
|
||
esac
|
||
# Text for banner N, set to a single space once printed.
|
||
m4_divert_pop([BANNERS])dnl back to DEFAULTS
|
||
m4_divert_push([PREPARE_TESTS])dnl
|
||
|
||
m4_text_box([Autotest shell functions.])
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_banner], [NUMBER],
|
||
[Output banner NUMBER, provided the testsuite is running multiple groups
|
||
and this particular banner has not yet been printed.])
|
||
at_fn_banner ()
|
||
{
|
||
$at_print_banners || return 0
|
||
eval at_banner_text=\$at_banner_text_$[1]
|
||
test "x$at_banner_text" = "x " && return 0
|
||
eval "at_banner_text_$[1]=\" \""
|
||
if test -z "$at_banner_text"; then
|
||
$at_first || echo
|
||
else
|
||
AS_ECHO(["$as_nl$at_banner_text$as_nl"])
|
||
fi
|
||
} # at_fn_banner
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_check_prepare_notrace], [REASON LINE],
|
||
[Perform AT_CHECK preparations for the command at LINE for an
|
||
untraceable command; REASON is the reason for disabling tracing.])
|
||
at_fn_check_prepare_notrace ()
|
||
{
|
||
$at_trace_echo "Not enabling shell tracing (command contains $[1])"
|
||
AS_ECHO(["$[2]"]) >"$at_check_line_file"
|
||
at_check_trace=: at_check_filter=:
|
||
: >"$at_stdout"; : >"$at_stderr"
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_check_prepare_trace], [LINE],
|
||
[Perform AT_CHECK preparations for the command at LINE for a traceable
|
||
command.])
|
||
at_fn_check_prepare_trace ()
|
||
{
|
||
AS_ECHO(["$[1]"]) >"$at_check_line_file"
|
||
at_check_trace=$at_traceon at_check_filter=$at_check_filter_trace
|
||
: >"$at_stdout"; : >"$at_stderr"
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_check_prepare_dynamic], [COMMAND LINE],
|
||
[Decide if COMMAND at LINE is traceable at runtime, and call the
|
||
appropriate preparation function.])
|
||
at_fn_check_prepare_dynamic ()
|
||
{
|
||
case $[1] in
|
||
*$as_nl*)
|
||
at_fn_check_prepare_notrace 'an embedded newline' "$[2]" ;;
|
||
*)
|
||
at_fn_check_prepare_trace "$[2]" ;;
|
||
esac
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_filter_trace], [],
|
||
[Remove the lines in the file "$at_stderr" generated by "set -x" and print
|
||
them to stderr.])
|
||
at_fn_filter_trace ()
|
||
{
|
||
mv "$at_stderr" "$at_stder1"
|
||
grep '^ *+' "$at_stder1" >&2
|
||
grep -v '^ *+' "$at_stder1" >"$at_stderr"
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_log_failure], [FILE-LIST],
|
||
[Copy the files in the list on stdout with a "> " prefix, and exit the shell
|
||
with a failure exit code.])
|
||
at_fn_log_failure ()
|
||
{
|
||
for file
|
||
do AS_ECHO(["$file:"]); sed 's/^/> /' "$file"; done
|
||
echo 1 > "$at_status_file"
|
||
exit 1
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_check_skip], [EXIT-CODE LINE],
|
||
[Check whether EXIT-CODE is a special exit code (77 or 99), and if so exit
|
||
the test group subshell with that same exit code. Use LINE in any report
|
||
about test failure.])
|
||
at_fn_check_skip ()
|
||
{
|
||
case $[1] in
|
||
99) echo 99 > "$at_status_file"; at_failed=:
|
||
AS_ECHO(["$[2]: hard failure"]); exit 99;;
|
||
77) echo 77 > "$at_status_file"; exit 77;;
|
||
esac
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_check_status], [EXPECTED EXIT-CODE LINE],
|
||
[Check whether EXIT-CODE is the EXPECTED exit code, and if so do nothing.
|
||
Otherwise, if it is 77 or 99, exit the test group subshell with that same
|
||
exit code; if it is anything else print an error message referring to LINE,
|
||
and fail the test.])
|
||
at_fn_check_status ()
|
||
{
|
||
dnl This order ensures that we don't `skip' if we are precisely checking
|
||
dnl $? = 77 or $? = 99.
|
||
case $[2] in
|
||
$[1] ) ;;
|
||
77) echo 77 > "$at_status_file"; exit 77;;
|
||
99) echo 99 > "$at_status_file"; at_failed=:
|
||
AS_ECHO(["$[3]: hard failure"]); exit 99;;
|
||
*) AS_ECHO(["$[3]: exit code was $[2], expected $[1]"])
|
||
at_failed=:;;
|
||
esac
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_diff_devnull], [FILE],
|
||
[Emit a diff between /dev/null and FILE. Uses "test -s" to avoid useless
|
||
diff invocations.])
|
||
at_fn_diff_devnull ()
|
||
{
|
||
test -s "$[1]" || return 0
|
||
$at_diff "$at_devnull" "$[1]"
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_test], [NUMBER],
|
||
[Parse out test NUMBER from the tail of this file.])
|
||
at_fn_test ()
|
||
{
|
||
eval at_sed=\$at_sed$[1]
|
||
sed "$at_sed" "$at_myself" > "$at_test_source"
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_create_debugging_script], [],
|
||
[Create the debugging script $at_group_dir/run which will reproduce the
|
||
current test group.])
|
||
at_fn_create_debugging_script ()
|
||
{
|
||
{
|
||
echo "#! /bin/sh" &&
|
||
echo 'test "${ZSH_VERSION+set}" = set dnl
|
||
&& alias -g '\''${1+"$[@]"}'\''='\''"$[@]"'\''' &&
|
||
AS_ECHO(["cd '$at_dir'"]) &&
|
||
AS_ECHO(["exec \${CONFIG_SHELL-$SHELL} \"$at_myself\" -v -d ]dnl
|
||
[$at_debug_args $at_group \${1+\"\$[@]\"}"]) &&
|
||
echo 'exit 1'
|
||
} >"$at_group_dir/run" &&
|
||
chmod +x "$at_group_dir/run"
|
||
}
|
||
|
||
m4_text_box([End of autotest shell functions.])
|
||
m4_divert_pop([PREPARE_TESTS])dnl back to DEFAULTS
|
||
|
||
# Not all shells have the 'times' builtin; the subshell is needed to make
|
||
# sure we discard the 'times: not found' message from the shell.
|
||
at_times_p=false
|
||
(times) >/dev/null 2>&1 && at_times_p=:
|
||
|
||
# CLI Arguments to pass to the debugging scripts.
|
||
at_debug_args=
|
||
# -e sets to true
|
||
at_errexit_p=false
|
||
# Shall we be verbose? ':' means no, empty means yes.
|
||
at_verbose=:
|
||
at_quiet=
|
||
# Running several jobs in parallel, 0 means as many as test groups.
|
||
at_jobs=1
|
||
at_traceon=:
|
||
at_trace_echo=:
|
||
at_check_filter_trace=:
|
||
|
||
# Shall we keep the debug scripts? Must be `:' when the suite is
|
||
# run by a debug script, so that the script doesn't remove itself.
|
||
at_debug_p=false
|
||
# Display help message?
|
||
at_help_p=false
|
||
# Display the version message?
|
||
at_version_p=false
|
||
# List test groups?
|
||
at_list_p=false
|
||
# --clean
|
||
at_clean=false
|
||
# Test groups to run
|
||
at_groups=
|
||
# Whether to rerun failed tests.
|
||
at_recheck=
|
||
# Whether a write failure occurred
|
||
at_write_fail=0
|
||
|
||
# The directory we run the suite in. Default to . if no -C option.
|
||
at_dir=`pwd`
|
||
# An absolute reference to this testsuite script.
|
||
dnl m4-double quote, to preserve []
|
||
[case $as_myself in
|
||
[\\/]* | ?:[\\/]* ) at_myself=$as_myself ;;
|
||
* ) at_myself=$at_dir/$as_myself ;;
|
||
esac]
|
||
# Whether -C is in effect.
|
||
at_change_dir=false
|
||
m4_divert_pop([DEFAULTS])dnl
|
||
m4_define([_AT_FINISH],
|
||
[m4_ifdef([AT_ingroup], [m4_fatal([missing AT_CLEANUP detected])])dnl
|
||
m4_divert_text([DEFAULTS],
|
||
[
|
||
# Whether to enable colored test results.
|
||
at_color=m4_ifdef([AT_color], [AT_color], [no])
|
||
# List of the tested programs.
|
||
at_tested='m4_ifdef([AT_tested],
|
||
[m4_translit(m4_dquote(m4_defn([AT_tested])), [ ], m4_newline)])'
|
||
# As many question marks as there are digits in the last test group number.
|
||
# Used to normalize the test group numbers so that `ls' lists them in
|
||
# numerical order.
|
||
at_format='m4_bpatsubst(m4_defn([AT_ordinal]), [.], [?])'
|
||
# Description of all the test groups.
|
||
at_help_all="AS_ESCAPE(m4_dquote(m4_defn([AT_help_all])))"
|
||
# List of the all the test groups.
|
||
at_groups_all=`AS_ECHO(["$at_help_all"]) | sed 's/;.*//'`
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_validate_ranges], [NAME...],
|
||
[Validate and normalize the test group number contained in each
|
||
variable NAME. Leading zeroes are treated as decimal.])
|
||
at_fn_validate_ranges ()
|
||
{
|
||
for at_grp
|
||
do
|
||
eval at_value=\$$at_grp
|
||
if test $at_value -lt 1 || test $at_value -gt AT_ordinal; then
|
||
AS_ECHO(["invalid test group: $at_value"]) >&2
|
||
exit 1
|
||
fi
|
||
case $at_value in
|
||
0*) # We want to treat leading 0 as decimal, like expr and test, but
|
||
# AS_VAR_ARITH treats it as octal if it uses $(( )).
|
||
# With XSI shells, ${at_value#${at_value%%[1-9]*}} avoids the
|
||
# expr fork, but it is not worth the effort to determine if the
|
||
# shell supports XSI when the user can just avoid leading 0.
|
||
eval $at_grp='`expr $at_value + 0`' ;;
|
||
esac
|
||
done
|
||
}])])dnl
|
||
m4_divert_push([PARSE_ARGS])dnl
|
||
|
||
at_prev=
|
||
for at_option
|
||
do
|
||
# If the previous option needs an argument, assign it.
|
||
if test -n "$at_prev"; then
|
||
at_option=$at_prev=$at_option
|
||
at_prev=
|
||
fi
|
||
|
||
case $at_option in
|
||
*=?*) at_optarg=`expr "X$at_option" : '[[^=]]*=\(.*\)'` ;;
|
||
*) at_optarg= ;;
|
||
esac
|
||
|
||
# Accept the important Cygnus configure options, so we can diagnose typos.
|
||
|
||
case $at_option in
|
||
--help | -h )
|
||
at_help_p=:
|
||
;;
|
||
|
||
--list | -l )
|
||
at_list_p=:
|
||
;;
|
||
|
||
--version | -V )
|
||
at_version_p=:
|
||
;;
|
||
|
||
--clean | -c )
|
||
at_clean=:
|
||
;;
|
||
|
||
--color )
|
||
at_color=always
|
||
;;
|
||
--color=* )
|
||
case $at_optarg in
|
||
no | never | none) at_color=never ;;
|
||
auto | tty | if-tty) at_color=auto ;;
|
||
always | yes | force) at_color=always ;;
|
||
*) at_optname=`echo " $at_option" | sed 's/^ //; s/=.*//'`
|
||
AS_ERROR([unrecognized argument to $at_optname: $at_optarg]) ;;
|
||
esac
|
||
;;
|
||
|
||
--debug | -d )
|
||
at_debug_p=:
|
||
;;
|
||
|
||
--errexit | -e )
|
||
at_debug_p=:
|
||
at_errexit_p=:
|
||
;;
|
||
|
||
--verbose | -v )
|
||
at_verbose=; at_quiet=:
|
||
;;
|
||
|
||
--trace | -x )
|
||
at_traceon='set -x'
|
||
at_trace_echo=echo
|
||
at_check_filter_trace=at_fn_filter_trace
|
||
;;
|
||
|
||
[[0-9] | [0-9][0-9] | [0-9][0-9][0-9] | [0-9][0-9][0-9][0-9]])
|
||
at_fn_validate_ranges at_option
|
||
AS_VAR_APPEND([at_groups], ["$at_option$as_nl"])
|
||
;;
|
||
|
||
# Ranges
|
||
[[0-9]- | [0-9][0-9]- | [0-9][0-9][0-9]- | [0-9][0-9][0-9][0-9]-])
|
||
at_range_start=`echo $at_option |tr -d X-`
|
||
at_fn_validate_ranges at_range_start
|
||
at_range=`AS_ECHO(["$at_groups_all"]) | \
|
||
sed -ne '/^'$at_range_start'$/,$p'`
|
||
AS_VAR_APPEND([at_groups], ["$at_range$as_nl"])
|
||
;;
|
||
|
||
[-[0-9] | -[0-9][0-9] | -[0-9][0-9][0-9] | -[0-9][0-9][0-9][0-9]])
|
||
at_range_end=`echo $at_option |tr -d X-`
|
||
at_fn_validate_ranges at_range_end
|
||
at_range=`AS_ECHO(["$at_groups_all"]) | \
|
||
sed -ne '1,/^'$at_range_end'$/p'`
|
||
AS_VAR_APPEND([at_groups], ["$at_range$as_nl"])
|
||
;;
|
||
|
||
[[0-9]-[0-9] | [0-9]-[0-9][0-9] | [0-9]-[0-9][0-9][0-9]] | \
|
||
[[0-9]-[0-9][0-9][0-9][0-9] | [0-9][0-9]-[0-9][0-9]] | \
|
||
[[0-9][0-9]-[0-9][0-9][0-9] | [0-9][0-9]-[0-9][0-9][0-9][0-9]] | \
|
||
[[0-9][0-9][0-9]-[0-9][0-9][0-9]] | \
|
||
[[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]] | \
|
||
[[0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]] )
|
||
at_range_start=`expr $at_option : '\(.*\)-'`
|
||
at_range_end=`expr $at_option : '.*-\(.*\)'`
|
||
if test $at_range_start -gt $at_range_end; then
|
||
at_tmp=$at_range_end
|
||
at_range_end=$at_range_start
|
||
at_range_start=$at_tmp
|
||
fi
|
||
at_fn_validate_ranges at_range_start at_range_end
|
||
at_range=`AS_ECHO(["$at_groups_all"]) | \
|
||
sed -ne '/^'$at_range_start'$/,/^'$at_range_end'$/p'`
|
||
AS_VAR_APPEND([at_groups], ["$at_range$as_nl"])
|
||
;;
|
||
|
||
# Directory selection.
|
||
--directory | -C )
|
||
at_prev=--directory
|
||
;;
|
||
--directory=* )
|
||
at_change_dir=:
|
||
at_dir=$at_optarg
|
||
if test x- = "x$at_dir" ; then
|
||
at_dir=./-
|
||
fi
|
||
;;
|
||
|
||
# Parallel execution.
|
||
--jobs | -j )
|
||
at_jobs=0
|
||
;;
|
||
--jobs=* | -j[[0-9]]* )
|
||
if test -n "$at_optarg"; then
|
||
at_jobs=$at_optarg
|
||
else
|
||
at_jobs=`expr X$at_option : 'X-j\(.*\)'`
|
||
fi
|
||
case $at_jobs in *[[!0-9]]*)
|
||
at_optname=`echo " $at_option" | sed 's/^ //; s/[[0-9=]].*//'`
|
||
AS_ERROR([non-numeric argument to $at_optname: $at_jobs]) ;;
|
||
esac
|
||
;;
|
||
|
||
# Keywords.
|
||
--keywords | -k )
|
||
at_prev=--keywords
|
||
;;
|
||
--keywords=* )
|
||
at_groups_selected=$at_help_all
|
||
at_save_IFS=$IFS
|
||
IFS=,
|
||
set X $at_optarg
|
||
shift
|
||
IFS=$at_save_IFS
|
||
for at_keyword
|
||
do
|
||
at_invert=
|
||
case $at_keyword in
|
||
'!'*)
|
||
at_invert="-v"
|
||
at_keyword=`expr "X$at_keyword" : 'X!\(.*\)'`
|
||
;;
|
||
esac
|
||
# It is on purpose that we match the test group titles too.
|
||
at_groups_selected=`AS_ECHO(["$at_groups_selected"]) |
|
||
grep -i $at_invert ["^[1-9][^;]*;.*[; ]$at_keyword[ ;]"]`
|
||
done
|
||
# Smash the keywords.
|
||
at_groups_selected=`AS_ECHO(["$at_groups_selected"]) | sed 's/;.*//'`
|
||
AS_VAR_APPEND([at_groups], ["$at_groups_selected$as_nl"])
|
||
;;
|
||
--recheck)
|
||
at_recheck=:
|
||
;;
|
||
m4_divert_pop([PARSE_ARGS])dnl
|
||
dnl Process *=* last to allow for user specified --option=* type arguments.
|
||
m4_divert_push([PARSE_ARGS_END])dnl
|
||
|
||
*=*)
|
||
at_envvar=`expr "x$at_option" : 'x\([[^=]]*\)='`
|
||
# Reject names that are not valid shell variable names.
|
||
case $at_envvar in
|
||
'' | [[0-9]]* | *[[!_$as_cr_alnum]]* )
|
||
AS_ERROR([invalid variable name: `$at_envvar']) ;;
|
||
esac
|
||
at_value=`AS_ECHO(["$at_optarg"]) | sed "s/'/'\\\\\\\\''/g"`
|
||
# Export now, but save eval for later and for debug scripts.
|
||
export $at_envvar
|
||
AS_VAR_APPEND([at_debug_args], [" $at_envvar='$at_value'"])
|
||
;;
|
||
|
||
*) AS_ECHO(["$as_me: invalid option: $at_option"]) >&2
|
||
AS_ECHO(["Try \`$[0] --help' for more information."]) >&2
|
||
exit 1
|
||
;;
|
||
esac
|
||
done
|
||
|
||
# Verify our last option didn't require an argument
|
||
AS_IF([test -n "$at_prev"], [AS_ERROR([`$at_prev' requires an argument])])
|
||
|
||
# The file containing the suite.
|
||
at_suite_log=$at_dir/$as_me.log
|
||
|
||
# Selected test groups.
|
||
if test -z "$at_groups$at_recheck"; then
|
||
at_groups=$at_groups_all
|
||
else
|
||
if test -n "$at_recheck" && test -r "$at_suite_log"; then
|
||
at_oldfails=`sed -n ['
|
||
/^Failed tests:$/,/^Skipped tests:$/{
|
||
s/^[ ]*\([1-9][0-9]*\):.*/\1/p
|
||
}
|
||
/^Unexpected passes:$/,/^## Detailed failed tests/{
|
||
s/^[ ]*\([1-9][0-9]*\):.*/\1/p
|
||
}
|
||
/^## Detailed failed tests/q
|
||
'] "$at_suite_log"`
|
||
AS_VAR_APPEND([at_groups], ["$at_oldfails$as_nl"])
|
||
fi
|
||
# Sort the tests, removing duplicates.
|
||
at_groups=`AS_ECHO(["$at_groups"]) | sort -nu | sed '/^$/d'`
|
||
fi
|
||
|
||
if test x"$at_color" = xalways \
|
||
|| { test x"$at_color" = xauto && test -t 1; }; then
|
||
at_red=`printf '\033@<:@0;31m'`
|
||
at_grn=`printf '\033@<:@0;32m'`
|
||
at_lgn=`printf '\033@<:@1;32m'`
|
||
at_blu=`printf '\033@<:@1;34m'`
|
||
at_std=`printf '\033@<:@m'`
|
||
else
|
||
at_red= at_grn= at_lgn= at_blu= at_std=
|
||
fi
|
||
m4_divert_pop([PARSE_ARGS_END])dnl
|
||
m4_divert_push([HELP])dnl
|
||
|
||
# Help message.
|
||
if $at_help_p; then
|
||
cat <<_ATEOF || at_write_fail=1
|
||
Usage: $[0] [[OPTION]... [VARIABLE=VALUE]... [TESTS]]
|
||
|
||
Run all the tests, or the selected TESTS, given by numeric ranges, and
|
||
save a detailed log file. Upon failure, create debugging scripts.
|
||
|
||
Do not change environment variables directly. Instead, set them via
|
||
command line arguments. Set \`AUTOTEST_PATH' to select the executables
|
||
to exercise. Each relative directory is expanded as build and source
|
||
directories relative to the top level of this distribution.
|
||
E.g., from within the build directory /tmp/foo-1.0, invoking this:
|
||
|
||
$ $[0] AUTOTEST_PATH=bin
|
||
|
||
is equivalent to the following, assuming the source directory is /src/foo-1.0:
|
||
|
||
PATH=/tmp/foo-1.0/bin:/src/foo-1.0/bin:\$PATH $[0]
|
||
_ATEOF
|
||
m4_divert_pop([HELP])dnl
|
||
m4_divert_push([HELP_MODES])dnl
|
||
cat <<_ATEOF || at_write_fail=1
|
||
|
||
Operation modes:
|
||
-h, --help print the help message, then exit
|
||
-V, --version print version number, then exit
|
||
-c, --clean remove all the files this test suite might create and exit
|
||
-l, --list describes all the tests, or the selected TESTS
|
||
_ATEOF
|
||
m4_divert_pop([HELP_MODES])dnl
|
||
m4_wrap([m4_divert_push([HELP_TUNING_BEGIN])dnl
|
||
cat <<_ATEOF || at_write_fail=1
|
||
|
||
dnl extra quoting prevents emacs whitespace mode from putting tabs in output
|
||
Execution tuning:
|
||
-C, --directory=DIR
|
||
[ change to directory DIR before starting]
|
||
--color[[=never|auto|always]]
|
||
[ ]m4_ifdef([AT_color],
|
||
[disable colored test results, or enable even without terminal],
|
||
[enable colored test results on terminal, or always])
|
||
-j, --jobs[[=N]]
|
||
[ Allow N jobs at once; infinite jobs with no arg (default 1)]
|
||
-k, --keywords=KEYWORDS
|
||
[ select the tests matching all the comma-separated KEYWORDS]
|
||
[ multiple \`-k' accumulate; prefixed \`!' negates a KEYWORD]
|
||
--recheck select all tests that failed or passed unexpectedly last time
|
||
-e, --errexit abort as soon as a test fails; implies --debug
|
||
-v, --verbose force more detailed output
|
||
[ default for debugging scripts]
|
||
-d, --debug inhibit clean up and top-level logging
|
||
[ default for debugging scripts]
|
||
-x, --trace enable tests shell tracing
|
||
_ATEOF
|
||
m4_divert_pop([HELP_TUNING_BEGIN])])dnl
|
||
m4_divert_push([HELP_END])dnl
|
||
cat <<_ATEOF || at_write_fail=1
|
||
|
||
Report bugs to <AT_PACKAGE_BUGREPORT>.dnl
|
||
m4_ifdef([AT_PACKAGE_NAME],
|
||
[m4_ifset([AT_PACKAGE_URL], [
|
||
m4_defn([AT_PACKAGE_NAME]) home page: <AT_PACKAGE_URL>.])dnl
|
||
m4_if(m4_index(m4_defn([AT_PACKAGE_NAME]), [GNU ]), [0], [
|
||
General help using GNU software: <http://www.gnu.org/gethelp/>.])])
|
||
_ATEOF
|
||
exit $at_write_fail
|
||
fi
|
||
|
||
# List of tests.
|
||
if $at_list_p; then
|
||
cat <<_ATEOF || at_write_fail=1
|
||
AT_TESTSUITE_NAME test groups:
|
||
|
||
NUM: FILE-NAME:LINE TEST-GROUP-NAME
|
||
KEYWORDS
|
||
|
||
_ATEOF
|
||
# Pass an empty line as separator between selected groups and help.
|
||
AS_ECHO(["$at_groups$as_nl$as_nl$at_help_all"]) |
|
||
awk 'NF == 1 && FS != ";" {
|
||
selected[[$ 1]] = 1
|
||
next
|
||
}
|
||
/^$/ { FS = ";" }
|
||
NF > 0 {
|
||
if (selected[[$ 1]]) {
|
||
printf " %3d: %-18s %s\n", $ 1, $ 2, $ 3
|
||
if ($ 4) {
|
||
lmax = 79
|
||
indent = " "
|
||
line = indent
|
||
len = length (line)
|
||
n = split ($ 4, a, " ")
|
||
for (i = 1; i <= n; i++) {
|
||
l = length (a[[i]]) + 1
|
||
if (i > 1 && len + l > lmax) {
|
||
print line
|
||
line = indent " " a[[i]]
|
||
len = length (line)
|
||
} else {
|
||
line = line " " a[[i]]
|
||
len += l
|
||
}
|
||
}
|
||
if (n)
|
||
print line
|
||
}
|
||
}
|
||
}' || at_write_fail=1
|
||
exit $at_write_fail
|
||
fi
|
||
m4_divert_pop([HELP_END])dnl
|
||
m4_divert_push([VERSION])dnl
|
||
if $at_version_p; then
|
||
AS_ECHO(["$as_me (AT_PACKAGE_STRING)"]) &&
|
||
cat <<\_ATEOF || at_write_fail=1
|
||
m4_divert_pop([VERSION])dnl
|
||
m4_divert_push([VERSION_END])dnl
|
||
_ATEOF
|
||
exit $at_write_fail
|
||
fi
|
||
m4_divert_pop([VERSION_END])dnl
|
||
m4_divert_push([TESTS_BEGIN])dnl
|
||
|
||
# Take any -C into account.
|
||
if $at_change_dir ; then
|
||
test x != "x$at_dir" && cd "$at_dir" \
|
||
|| AS_ERROR([unable to change directory])
|
||
at_dir=`pwd`
|
||
fi
|
||
|
||
# Load the config files for any default variable assignments.
|
||
for at_file in atconfig atlocal
|
||
do
|
||
test -r $at_file || continue
|
||
. ./$at_file || AS_ERROR([invalid content: $at_file])
|
||
done
|
||
|
||
# Autoconf <=2.59b set at_top_builddir instead of at_top_build_prefix:
|
||
: "${at_top_build_prefix=$at_top_builddir}"
|
||
|
||
# Perform any assignments requested during argument parsing.
|
||
eval "$at_debug_args"
|
||
|
||
# atconfig delivers names relative to the directory the test suite is
|
||
# in, but the groups themselves are run in testsuite-dir/group-dir.
|
||
if test -n "$at_top_srcdir"; then
|
||
builddir=../..
|
||
for at_dir_var in srcdir top_srcdir top_build_prefix
|
||
do
|
||
AS_VAR_COPY([at_val], [at_$at_dir_var])
|
||
case $at_val in
|
||
[[\\/$]]* | ?:[[\\/]]* ) at_prefix= ;;
|
||
*) at_prefix=../../ ;;
|
||
esac
|
||
AS_VAR_SET([$at_dir_var], [$at_prefix$at_val])
|
||
done
|
||
fi
|
||
|
||
m4_text_box([Directory structure.])
|
||
|
||
# This is the set of directories and files used by this script
|
||
# (non-literals are capitalized):
|
||
#
|
||
# TESTSUITE - the testsuite
|
||
# TESTSUITE.log - summarizes the complete testsuite run
|
||
# TESTSUITE.dir/ - created during a run, remains after -d or failed test
|
||
# + at-groups/ - during a run: status of all groups in run
|
||
# | + NNN/ - during a run: meta-data about test group NNN
|
||
# | | + check-line - location (source file and line) of current AT_CHECK
|
||
# | | + status - exit status of current AT_CHECK
|
||
# | | + stdout - stdout of current AT_CHECK
|
||
# | | + stder1 - stderr, including trace
|
||
# | | + stderr - stderr, with trace filtered out
|
||
# | | + test-source - portion of testsuite that defines group
|
||
# | | + times - timestamps for computing duration
|
||
# | | + pass - created if group passed
|
||
# | | + xpass - created if group xpassed
|
||
# | | + fail - created if group failed
|
||
# | | + xfail - created if group xfailed
|
||
# | | + skip - created if group skipped
|
||
# + at-stop - during a run: end the run if this file exists
|
||
# + at-source-lines - during a run: cache of TESTSUITE line numbers for extraction
|
||
# + 0..NNN/ - created for each group NNN, remains after -d or failed test
|
||
# | + TESTSUITE.log - summarizes the group results
|
||
# | + ... - files created during the group
|
||
|
||
# The directory the whole suite works in.
|
||
# Should be absolute to let the user `cd' at will.
|
||
at_suite_dir=$at_dir/$as_me.dir
|
||
# The file containing the suite ($at_dir might have changed since earlier).
|
||
at_suite_log=$at_dir/$as_me.log
|
||
# The directory containing helper files per test group.
|
||
at_helper_dir=$at_suite_dir/at-groups
|
||
# Stop file: if it exists, do not start new jobs.
|
||
at_stop_file=$at_suite_dir/at-stop
|
||
# The fifo used for the job dispatcher.
|
||
at_job_fifo=$at_suite_dir/at-job-fifo
|
||
|
||
if $at_clean; then
|
||
test -d "$at_suite_dir" &&
|
||
find "$at_suite_dir" -type d ! -perm -700 -exec chmod u+rwx \{\} \;
|
||
rm -f -r "$at_suite_dir" "$at_suite_log"
|
||
exit $?
|
||
fi
|
||
|
||
# Don't take risks: use only absolute directories in PATH.
|
||
#
|
||
# For stand-alone test suites (ie. atconfig was not found),
|
||
# AUTOTEST_PATH is relative to `.'.
|
||
#
|
||
# For embedded test suites, AUTOTEST_PATH is relative to the top level
|
||
# of the package. Then expand it into build/src parts, since users
|
||
# may create executables in both places.
|
||
AUTOTEST_PATH=`AS_ECHO(["$AUTOTEST_PATH"]) | sed "s|:|$PATH_SEPARATOR|g"`
|
||
at_path=
|
||
_AS_PATH_WALK([$AUTOTEST_PATH $PATH],
|
||
[test -n "$at_path" && AS_VAR_APPEND([at_path], [$PATH_SEPARATOR])
|
||
case $as_dir in
|
||
[[\\/]]* | ?:[[\\/]]* )
|
||
AS_VAR_APPEND([at_path], ["$as_dir"])
|
||
;;
|
||
* )
|
||
if test -z "$at_top_build_prefix"; then
|
||
# Stand-alone test suite.
|
||
AS_VAR_APPEND([at_path], ["$as_dir"])
|
||
else
|
||
# Embedded test suite.
|
||
AS_VAR_APPEND([at_path], ["$at_top_build_prefix$as_dir$PATH_SEPARATOR"])
|
||
AS_VAR_APPEND([at_path], ["$at_top_srcdir/$as_dir"])
|
||
fi
|
||
;;
|
||
esac])
|
||
|
||
# Now build and simplify PATH.
|
||
#
|
||
# There might be directories that don't exist, but don't redirect
|
||
# builtins' (eg., cd) stderr directly: Ultrix's sh hates that.
|
||
at_new_path=
|
||
_AS_PATH_WALK([$at_path],
|
||
[test -d "$as_dir" || continue
|
||
case $as_dir in
|
||
[[\\/]]* | ?:[[\\/]]* ) ;;
|
||
* ) as_dir=`(cd "$as_dir" && pwd) 2>/dev/null` ;;
|
||
esac
|
||
case $PATH_SEPARATOR$at_new_path$PATH_SEPARATOR in
|
||
*$PATH_SEPARATOR$as_dir$PATH_SEPARATOR*) ;;
|
||
$PATH_SEPARATOR$PATH_SEPARATOR) at_new_path=$as_dir ;;
|
||
*) AS_VAR_APPEND([at_new_path], ["$PATH_SEPARATOR$as_dir"]) ;;
|
||
esac])
|
||
PATH=$at_new_path
|
||
export PATH
|
||
|
||
# Setting up the FDs.
|
||
m4_define([AS_MESSAGE_LOG_FD], [5])
|
||
dnl The parent needs two fds to the same fifo, otherwise, there is a race
|
||
dnl where the parent can read the fifo before a child opens it for writing
|
||
m4_define([AT_JOB_FIFO_IN_FD], [6])
|
||
m4_define([AT_JOB_FIFO_OUT_FD], [7])
|
||
[#] AS_MESSAGE_LOG_FD is the log file. Not to be overwritten if `-d'.
|
||
if $at_debug_p; then
|
||
at_suite_log=/dev/null
|
||
else
|
||
: >"$at_suite_log"
|
||
fi
|
||
exec AS_MESSAGE_LOG_FD>>"$at_suite_log"
|
||
|
||
# Banners and logs.
|
||
AS_BOX(m4_defn([AT_TESTSUITE_NAME])[.])
|
||
{
|
||
AS_BOX(m4_defn([AT_TESTSUITE_NAME])[.])
|
||
echo
|
||
|
||
AS_ECHO(["$as_me: command line was:"])
|
||
AS_ECHO([" \$ $[0] $at_cli_args"])
|
||
echo
|
||
|
||
# If ChangeLog exists, list a few lines in case it might help determining
|
||
# the exact version.
|
||
if test -n "$at_top_srcdir" && test -f "$at_top_srcdir/ChangeLog"; then
|
||
AS_BOX([ChangeLog.])
|
||
echo
|
||
sed 's/^/| /;10q' "$at_top_srcdir/ChangeLog"
|
||
echo
|
||
fi
|
||
|
||
AS_UNAME
|
||
echo
|
||
|
||
# Contents of the config files.
|
||
for at_file in atconfig atlocal
|
||
do
|
||
test -r $at_file || continue
|
||
AS_ECHO(["$as_me: $at_file:"])
|
||
sed 's/^/| /' $at_file
|
||
echo
|
||
done
|
||
} >&AS_MESSAGE_LOG_FD
|
||
|
||
m4_divert_pop([TESTS_BEGIN])dnl
|
||
m4_divert_push([PREPARE_TESTS])dnl
|
||
{
|
||
AS_BOX([Tested programs.])
|
||
echo
|
||
} >&AS_MESSAGE_LOG_FD
|
||
|
||
# Report what programs are being tested.
|
||
for at_program in : $at_tested
|
||
do
|
||
test "$at_program" = : && continue
|
||
case $at_program in
|
||
[[\\/]* | ?:[\\/]* ) $at_program_=$at_program ;;]
|
||
* )
|
||
_AS_PATH_WALK([$PATH], [test -f "$as_dir/$at_program" && break])
|
||
at_program_=$as_dir/$at_program ;;
|
||
esac
|
||
if test -f "$at_program_"; then
|
||
{
|
||
AS_ECHO(["$at_srcdir/AT_LINE: $at_program_ --version"])
|
||
"$at_program_" --version </dev/null
|
||
echo
|
||
} >&AS_MESSAGE_LOG_FD 2>&1
|
||
else
|
||
AS_ERROR([cannot find $at_program])
|
||
fi
|
||
done
|
||
|
||
{
|
||
AS_BOX([Running the tests.])
|
||
} >&AS_MESSAGE_LOG_FD
|
||
|
||
at_start_date=`date`
|
||
at_start_time=`date +%s 2>/dev/null`
|
||
AS_ECHO(["$as_me: starting at: $at_start_date"]) >&AS_MESSAGE_LOG_FD
|
||
m4_divert_pop([PREPARE_TESTS])dnl
|
||
m4_divert_push([TESTS])dnl
|
||
|
||
# Create the master directory if it doesn't already exist.
|
||
AS_MKDIR_P(["$at_suite_dir"]) ||
|
||
AS_ERROR([cannot create `$at_suite_dir'])
|
||
|
||
# Can we diff with `/dev/null'? DU 5.0 refuses.
|
||
if diff /dev/null /dev/null >/dev/null 2>&1; then
|
||
at_devnull=/dev/null
|
||
else
|
||
at_devnull=$at_suite_dir/devnull
|
||
>"$at_devnull"
|
||
fi
|
||
|
||
# Use `diff -u' when possible.
|
||
if at_diff=`diff -u "$at_devnull" "$at_devnull" 2>&1` && test -z "$at_diff"
|
||
then
|
||
at_diff='diff -u'
|
||
else
|
||
at_diff=diff
|
||
fi
|
||
|
||
# Get the last needed group.
|
||
for at_group in : $at_groups; do :; done
|
||
|
||
# Extract the start and end lines of each test group at the tail
|
||
# of this file
|
||
awk '
|
||
BEGIN { FS="" }
|
||
/^@%:@AT_START_/ {
|
||
start = NR
|
||
}
|
||
/^@%:@AT_STOP_/ {
|
||
test = substr ($ 0, 10)
|
||
print "at_sed" test "=\"1," start "d;" (NR-1) "q\""
|
||
if (test == "'"$at_group"'") exit
|
||
}' "$at_myself" > "$at_suite_dir/at-source-lines" &&
|
||
. "$at_suite_dir/at-source-lines" ||
|
||
AS_ERROR([cannot create test line number cache])
|
||
rm -f "$at_suite_dir/at-source-lines"
|
||
|
||
# Set number of jobs for `-j'; avoid more jobs than test groups.
|
||
set X $at_groups; shift; at_max_jobs=$[@%:@]
|
||
if test $at_max_jobs -eq 0; then
|
||
at_jobs=1
|
||
fi
|
||
if test $at_jobs -ne 1 &&
|
||
{ test $at_jobs -eq 0 || test $at_jobs -gt $at_max_jobs; }; then
|
||
at_jobs=$at_max_jobs
|
||
fi
|
||
|
||
# If parallel mode, don't output banners, don't split summary lines.
|
||
if test $at_jobs -ne 1; then
|
||
at_print_banners=false
|
||
at_quiet=:
|
||
fi
|
||
|
||
# Set up helper dirs.
|
||
rm -rf "$at_helper_dir" &&
|
||
mkdir "$at_helper_dir" &&
|
||
cd "$at_helper_dir" &&
|
||
{ test -z "$at_groups" || mkdir $at_groups; } ||
|
||
AS_ERROR([testsuite directory setup failed])
|
||
|
||
# Functions for running a test group. We leave the actual
|
||
# test group execution outside of a shell function in order
|
||
# to avoid hitting zsh 4.x exit status bugs.
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_group_prepare], [],
|
||
[Prepare for running a test group.])
|
||
at_fn_group_prepare ()
|
||
{
|
||
# The directory for additional per-group helper files.
|
||
at_job_dir=$at_helper_dir/$at_group
|
||
# The file containing the location of the last AT_CHECK.
|
||
at_check_line_file=$at_job_dir/check-line
|
||
# The file containing the exit status of the last command.
|
||
at_status_file=$at_job_dir/status
|
||
# The files containing the output of the tested commands.
|
||
at_stdout=$at_job_dir/stdout
|
||
at_stder1=$at_job_dir/stder1
|
||
at_stderr=$at_job_dir/stderr
|
||
# The file containing the code for a test group.
|
||
at_test_source=$at_job_dir/test-source
|
||
# The file containing dates.
|
||
at_times_file=$at_job_dir/times
|
||
|
||
# Be sure to come back to the top test directory.
|
||
cd "$at_suite_dir"
|
||
|
||
# Clearly separate the test groups when verbose.
|
||
$at_first || $at_verbose echo
|
||
|
||
at_group_normalized=$at_group
|
||
_AT_NORMALIZE_TEST_GROUP_NUMBER(at_group_normalized)
|
||
|
||
# Create a fresh directory for the next test group, and enter.
|
||
# If one already exists, the user may have invoked ./run from
|
||
# within that directory; we remove the contents, but not the
|
||
# directory itself, so that we aren't pulling the rug out from
|
||
# under the shell's notion of the current directory.
|
||
at_group_dir=$at_suite_dir/$at_group_normalized
|
||
at_group_log=$at_group_dir/$as_me.log
|
||
_AS_CLEAN_DIR("$at_group_dir") ||
|
||
AS_WARN([test directory for $at_group_normalized could not be cleaned])
|
||
# Be tolerant if the above `rm' was not able to remove the directory.
|
||
AS_MKDIR_P(["$at_group_dir"])
|
||
|
||
echo 0 > "$at_status_file"
|
||
|
||
# In verbose mode, append to the log file *and* show on
|
||
# the standard output; in quiet mode only write to the log.
|
||
if test -z "$at_verbose"; then
|
||
at_tee_pipe='tee -a "$at_group_log"'
|
||
else
|
||
at_tee_pipe='cat >> "$at_group_log"'
|
||
fi
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_group_banner], [[ORDINAL LINE DESC PAD [BANNER]]],
|
||
[Declare the test group ORDINAL, located at LINE with group description
|
||
DESC, and residing under BANNER. Use PAD to align the status column.])
|
||
at_fn_group_banner ()
|
||
{
|
||
at_setup_line="$[2]"
|
||
test -n "$[5]" && at_fn_banner $[5]
|
||
at_desc="$[3]"
|
||
case $[1] in
|
||
[[0-9]]) at_desc_line=" $[1]: ";;
|
||
[[0-9][0-9]]) at_desc_line=" $[1]: " ;;
|
||
[*]) at_desc_line="$[1]: " ;;
|
||
esac
|
||
AS_VAR_APPEND([at_desc_line], ["$[3]$[4]"])
|
||
$at_quiet AS_ECHO_N(["$at_desc_line"])
|
||
echo "# -*- compilation -*-" >> "$at_group_log"
|
||
}
|
||
|
||
AS_FUNCTION_DESCRIBE([at_fn_group_postprocess], [],
|
||
[Perform cleanup after running a test group.])
|
||
at_fn_group_postprocess ()
|
||
{
|
||
# Be sure to come back to the suite directory, in particular
|
||
# since below we might `rm' the group directory we are in currently.
|
||
cd "$at_suite_dir"
|
||
|
||
if test ! -f "$at_check_line_file"; then
|
||
sed "s/^ */$as_me: WARNING: /" <<_ATEOF
|
||
A failure happened in a test group before any test could be
|
||
run. This means that test suite is improperly designed. Please
|
||
report this failure to <AT_PACKAGE_BUGREPORT>.
|
||
_ATEOF
|
||
AS_ECHO(["$at_setup_line"]) >"$at_check_line_file"
|
||
at_status=99
|
||
fi
|
||
$at_verbose AS_ECHO_N(["$at_group. $at_setup_line: "])
|
||
AS_ECHO_N(["$at_group. $at_setup_line: "]) >> "$at_group_log"
|
||
case $at_xfail:$at_status in
|
||
yes:0)
|
||
at_msg="UNEXPECTED PASS"
|
||
at_res=xpass
|
||
at_errexit=$at_errexit_p
|
||
at_color=$at_red
|
||
;;
|
||
no:0)
|
||
at_msg="ok"
|
||
at_res=pass
|
||
at_errexit=false
|
||
at_color=$at_grn
|
||
;;
|
||
*:77)
|
||
at_msg='skipped ('`cat "$at_check_line_file"`')'
|
||
at_res=skip
|
||
at_errexit=false
|
||
at_color=$at_blu
|
||
;;
|
||
no:* | *:99)
|
||
at_msg='FAILED ('`cat "$at_check_line_file"`')'
|
||
at_res=fail
|
||
at_errexit=$at_errexit_p
|
||
at_color=$at_red
|
||
;;
|
||
yes:*)
|
||
at_msg='expected failure ('`cat "$at_check_line_file"`')'
|
||
at_res=xfail
|
||
at_errexit=false
|
||
at_color=$at_lgn
|
||
;;
|
||
esac
|
||
echo "$at_res" > "$at_job_dir/$at_res"
|
||
# In parallel mode, output the summary line only afterwards.
|
||
if test $at_jobs -ne 1 && test -n "$at_verbose"; then
|
||
AS_ECHO(["$at_desc_line $at_color$at_msg$at_std"])
|
||
else
|
||
# Make sure there is a separator even with long titles.
|
||
AS_ECHO([" $at_color$at_msg$at_std"])
|
||
fi
|
||
at_log_msg="$at_group. $at_desc ($at_setup_line): $at_msg"
|
||
case $at_status in
|
||
0|77)
|
||
# $at_times_file is only available if the group succeeded.
|
||
# We're not including the group log, so the success message
|
||
# is written in the global log separately. But we also
|
||
# write to the group log in case they're using -d.
|
||
if test -f "$at_times_file"; then
|
||
at_log_msg="$at_log_msg ("`sed 1d "$at_times_file"`')'
|
||
rm -f "$at_times_file"
|
||
fi
|
||
AS_ECHO(["$at_log_msg"]) >> "$at_group_log"
|
||
AS_ECHO(["$at_log_msg"]) >&AS_MESSAGE_LOG_FD
|
||
|
||
# Cleanup the group directory, unless the user wants the files
|
||
# or the success was unexpected.
|
||
if $at_debug_p || test $at_res = xpass; then
|
||
at_fn_create_debugging_script
|
||
if test $at_res = xpass && $at_errexit; then
|
||
echo stop > "$at_stop_file"
|
||
fi
|
||
else
|
||
if test -d "$at_group_dir"; then
|
||
find "$at_group_dir" -type d ! -perm -700 -exec chmod u+rwx \{\} \;
|
||
rm -fr "$at_group_dir"
|
||
fi
|
||
rm -f "$at_test_source"
|
||
fi
|
||
;;
|
||
*)
|
||
# Upon failure, include the log into the testsuite's global
|
||
# log. The failure message is written in the group log. It
|
||
# is later included in the global log.
|
||
AS_ECHO(["$at_log_msg"]) >> "$at_group_log"
|
||
|
||
# Upon failure, keep the group directory for autopsy, and create
|
||
# the debugging script. With -e, do not start any further tests.
|
||
at_fn_create_debugging_script
|
||
if $at_errexit; then
|
||
echo stop > "$at_stop_file"
|
||
fi
|
||
;;
|
||
esac
|
||
}
|
||
|
||
|
||
m4_text_box([Driver loop.])
|
||
|
||
dnl Catching signals correctly:
|
||
dnl
|
||
dnl The first idea was: trap the signal, send it to all spawned jobs,
|
||
dnl then reset the handler and reraise the signal for ourselves.
|
||
dnl However, before exiting, ksh will then send the signal to all
|
||
dnl process group members, potentially killing the outer testsuite
|
||
dnl and/or the 'make' process driving us.
|
||
dnl So now the strategy is: trap the signal, send it to all spawned jobs,
|
||
dnl then exit the script with the right status.
|
||
dnl
|
||
dnl In order to let the jobs know about the signal, we cannot just send it
|
||
dnl to the current process group (kill $SIG 0), for the same reason as above.
|
||
dnl Also, it does not reliably stop the suite to send the signal to the
|
||
dnl spawned processes, because they might not transport it further
|
||
dnl (maybe this can be fixed?).
|
||
dnl
|
||
dnl So what we do is enable shell job control if available, which causes the
|
||
dnl shell to start each parallel task as its own shell job, thus as a new
|
||
dnl process group leader. We then send the signal to all new process groups.
|
||
|
||
dnl Do we have job control?
|
||
if (set -m && set +m && set +b) >/dev/null 2>&1; then
|
||
set +b
|
||
at_job_control_on='set -m' at_job_control_off='set +m' at_job_group=-
|
||
else
|
||
at_job_control_on=: at_job_control_off=: at_job_group=
|
||
fi
|
||
|
||
for at_signal in 1 2 15; do
|
||
dnl This signal handler is not suitable for PIPE: it causes writes.
|
||
dnl The code that was interrupted may have the errexit, monitor, or xtrace
|
||
dnl flags enabled, so sanitize.
|
||
trap 'set +x; set +e
|
||
$at_job_control_off
|
||
at_signal='"$at_signal"'
|
||
dnl Safety belt: even with runaway processes, prevent starting new jobs.
|
||
echo stop > "$at_stop_file"
|
||
dnl Do not enter this area multiple times, do not kill self prematurely.
|
||
trap "" $at_signal
|
||
dnl Gather process group IDs of currently running jobs.
|
||
at_pgids=
|
||
for at_pgid in `jobs -p 2>/dev/null`; do
|
||
at_pgids="$at_pgids $at_job_group$at_pgid"
|
||
done
|
||
dnl Ignore `kill' errors, as some jobs may have finished in the meantime.
|
||
test -z "$at_pgids" || kill -$at_signal $at_pgids 2>/dev/null
|
||
dnl wait until all jobs have exited.
|
||
wait
|
||
dnl Status output. Do this after waiting for the jobs, for ordered output.
|
||
dnl Avoid scribbling onto the end of a possibly incomplete line.
|
||
if test "$at_jobs" -eq 1 || test -z "$at_verbose"; then
|
||
echo >&2
|
||
fi
|
||
at_signame=`kill -l $at_signal 2>&1 || echo $at_signal`
|
||
set x $at_signame
|
||
test $# -gt 2 && at_signame=$at_signal
|
||
AS_WARN([caught signal $at_signame, bailing out])
|
||
dnl Do not reinstall the default handler here and reraise the signal to
|
||
dnl let the default handler do its job, see the note about ksh above.
|
||
dnl trap - $at_signal
|
||
dnl kill -$at_signal $$
|
||
dnl Instead, exit with appropriate status.
|
||
AS_VAR_ARITH([exit_status], [128 + $at_signal])
|
||
AS_EXIT([$exit_status])' $at_signal
|
||
done
|
||
|
||
rm -f "$at_stop_file"
|
||
at_first=:
|
||
|
||
if test $at_jobs -ne 1 &&
|
||
rm -f "$at_job_fifo" &&
|
||
test -n "$at_job_group" &&
|
||
( mkfifo "$at_job_fifo" && trap 'exit 1' PIPE STOP TSTP ) 2>/dev/null
|
||
then
|
||
# FIFO job dispatcher.
|
||
|
||
dnl Since we use job control, we need to propagate TSTP.
|
||
dnl This handler need not be used for serial execution.
|
||
dnl Again, we should stop all processes in the job groups, otherwise
|
||
dnl the stopping will not be effective while one test group is running.
|
||
dnl Apparently ksh does not honor the TSTP trap.
|
||
dnl As a safety measure, not use the same variable names as in the
|
||
dnl termination handlers above, one might get called during execution
|
||
dnl of the other.
|
||
trap 'at_pids=
|
||
for at_pid in `jobs -p`; do
|
||
at_pids="$at_pids $at_job_group$at_pid"
|
||
done
|
||
dnl Send it to all spawned jobs, ignoring those finished meanwhile.
|
||
if test -n "$at_pids"; then
|
||
dnl Unfortunately, ksh93 fork-bombs when we send TSTP, so send STOP
|
||
dnl if this might be ksh (STOP prevents possible TSTP handlers inside
|
||
dnl AT_CHECKs from running). Then stop ourselves.
|
||
at_sig=TSTP
|
||
test "${TMOUT+set}" = set && at_sig=STOP
|
||
kill -$at_sig $at_pids 2>/dev/null
|
||
fi
|
||
kill -STOP $$
|
||
dnl We got a CONT, so let's go again. Passing this to all processes
|
||
dnl in the groups is necessary (because we stopped them), but it may
|
||
dnl cause changed test semantics; e.g., a sleep will be interrupted.
|
||
test -z "$at_pids" || kill -CONT $at_pids 2>/dev/null' TSTP
|
||
|
||
echo
|
||
# Turn jobs into a list of numbers, starting from 1.
|
||
at_joblist=`AS_ECHO(["$at_groups"]) | sed -n 1,${at_jobs}p`
|
||
|
||
set X $at_joblist
|
||
shift
|
||
for at_group in $at_groups; do
|
||
dnl Enable job control only for spawning the test group:
|
||
dnl Let the jobs to run in separate process groups, but
|
||
dnl avoid all the status output by the shell.
|
||
$at_job_control_on 2>/dev/null
|
||
(
|
||
# Start one test group.
|
||
$at_job_control_off
|
||
dnl First child must open the fifo to avoid blocking parent; all other
|
||
dnl children inherit it already opened from the parent.
|
||
if $at_first; then
|
||
exec AT_JOB_FIFO_OUT_FD>"$at_job_fifo"
|
||
else
|
||
dnl Children do not need parent's copy of fifo.
|
||
exec AT_JOB_FIFO_IN_FD<&-
|
||
fi
|
||
dnl When a child receives PIPE, be sure to write back the token,
|
||
dnl so the master does not hang waiting for it.
|
||
dnl errexit and xtrace should not be set in this shell instance,
|
||
dnl except as debug measures. However, shells such as dash may
|
||
dnl optimize away the _AT_CHECK subshell, so normalize here.
|
||
trap 'set +x; set +e
|
||
dnl Ignore PIPE signals that stem from writing back the token.
|
||
trap "" PIPE
|
||
echo stop > "$at_stop_file"
|
||
echo >&AT_JOB_FIFO_OUT_FD
|
||
dnl Do not reraise the default PIPE handler.
|
||
dnl It wreaks havoc with ksh, see above.
|
||
dnl trap - 13
|
||
dnl kill -13 $$
|
||
AS_EXIT([141])' PIPE
|
||
at_fn_group_prepare
|
||
if cd "$at_group_dir" &&
|
||
at_fn_test $at_group &&
|
||
. "$at_test_source"
|
||
then :; else
|
||
AS_WARN([unable to parse test group: $at_group])
|
||
at_failed=:
|
||
fi
|
||
at_fn_group_postprocess
|
||
echo >&AT_JOB_FIFO_OUT_FD
|
||
) &
|
||
$at_job_control_off
|
||
if $at_first; then
|
||
at_first=false
|
||
exec AT_JOB_FIFO_IN_FD<"$at_job_fifo" AT_JOB_FIFO_OUT_FD>"$at_job_fifo"
|
||
fi
|
||
shift # Consume one token.
|
||
if test $[@%:@] -gt 0; then :; else
|
||
read at_token <&AT_JOB_FIFO_IN_FD || break
|
||
set x $[*]
|
||
fi
|
||
test -f "$at_stop_file" && break
|
||
done
|
||
exec AT_JOB_FIFO_OUT_FD>&-
|
||
# Read back the remaining ($at_jobs - 1) tokens.
|
||
set X $at_joblist
|
||
shift
|
||
if test $[@%:@] -gt 0; then
|
||
shift
|
||
for at_job
|
||
do
|
||
read at_token
|
||
done <&AT_JOB_FIFO_IN_FD
|
||
fi
|
||
exec AT_JOB_FIFO_IN_FD<&-
|
||
wait
|
||
else
|
||
# Run serially, avoid forks and other potential surprises.
|
||
for at_group in $at_groups; do
|
||
at_fn_group_prepare
|
||
if cd "$at_group_dir" &&
|
||
at_fn_test $at_group &&
|
||
. "$at_test_source"; then :; else
|
||
AS_WARN([unable to parse test group: $at_group])
|
||
at_failed=:
|
||
fi
|
||
at_fn_group_postprocess
|
||
test -f "$at_stop_file" && break
|
||
at_first=false
|
||
done
|
||
fi
|
||
|
||
# Wrap up the test suite with summary statistics.
|
||
cd "$at_helper_dir"
|
||
|
||
# Use ?..???? when the list must remain sorted, the faster * otherwise.
|
||
at_pass_list=`for f in */pass; do echo $f; done | sed '/\*/d; s,/pass,,'`
|
||
at_skip_list=`for f in */skip; do echo $f; done | sed '/\*/d; s,/skip,,'`
|
||
at_xfail_list=`for f in */xfail; do echo $f; done | sed '/\*/d; s,/xfail,,'`
|
||
at_xpass_list=`for f in ?/xpass ??/xpass ???/xpass ????/xpass; do
|
||
echo $f; done | sed '/?/d; s,/xpass,,'`
|
||
at_fail_list=`for f in ?/fail ??/fail ???/fail ????/fail; do
|
||
echo $f; done | sed '/?/d; s,/fail,,'`
|
||
|
||
set X $at_pass_list $at_xpass_list $at_xfail_list $at_fail_list $at_skip_list
|
||
shift; at_group_count=$[@%:@]
|
||
set X $at_xpass_list; shift; at_xpass_count=$[@%:@]; at_xpass_list=$[*]
|
||
set X $at_xfail_list; shift; at_xfail_count=$[@%:@]
|
||
set X $at_fail_list; shift; at_fail_count=$[@%:@]; at_fail_list=$[*]
|
||
set X $at_skip_list; shift; at_skip_count=$[@%:@]
|
||
|
||
AS_VAR_ARITH([at_run_count], [$at_group_count - $at_skip_count])
|
||
AS_VAR_ARITH([at_unexpected_count], [$at_xpass_count + $at_fail_count])
|
||
AS_VAR_ARITH([at_total_fail_count], [$at_xfail_count + $at_fail_count])
|
||
|
||
# Back to the top directory.
|
||
cd "$at_dir"
|
||
rm -rf "$at_helper_dir"
|
||
|
||
# Compute the duration of the suite.
|
||
at_stop_date=`date`
|
||
at_stop_time=`date +%s 2>/dev/null`
|
||
AS_ECHO(["$as_me: ending at: $at_stop_date"]) >&AS_MESSAGE_LOG_FD
|
||
case $at_start_time,$at_stop_time in
|
||
[[0-9]*,[0-9]*])
|
||
AS_VAR_ARITH([at_duration_s], [$at_stop_time - $at_start_time])
|
||
AS_VAR_ARITH([at_duration_m], [$at_duration_s / 60])
|
||
AS_VAR_ARITH([at_duration_h], [$at_duration_m / 60])
|
||
AS_VAR_ARITH([at_duration_s], [$at_duration_s % 60])
|
||
AS_VAR_ARITH([at_duration_m], [$at_duration_m % 60])
|
||
at_duration="${at_duration_h}h ${at_duration_m}m ${at_duration_s}s"
|
||
AS_ECHO(["$as_me: test suite duration: $at_duration"]) >&AS_MESSAGE_LOG_FD
|
||
;;
|
||
esac
|
||
|
||
echo
|
||
AS_BOX([Test results.])
|
||
echo
|
||
{
|
||
echo
|
||
AS_BOX([Test results.])
|
||
echo
|
||
} >&AS_MESSAGE_LOG_FD
|
||
|
||
dnl
|
||
dnl FIXME: this code is as far from i18n-cleanness as man
|
||
dnl could imagine...
|
||
dnl
|
||
if test $at_run_count = 1; then
|
||
at_result="1 test"
|
||
at_were=was
|
||
else
|
||
at_result="$at_run_count tests"
|
||
at_were=were
|
||
fi
|
||
if $at_errexit_p && test $at_unexpected_count != 0; then
|
||
if test $at_xpass_count = 1; then
|
||
at_result="$at_result $at_were run, one passed"
|
||
else
|
||
at_result="$at_result $at_were run, one failed"
|
||
fi
|
||
at_result="$at_result unexpectedly and inhibited subsequent tests."
|
||
at_color=$at_red
|
||
else
|
||
# Don't you just love exponential explosion of the number of cases?
|
||
at_color=$at_red
|
||
case $at_xpass_count:$at_fail_count:$at_xfail_count in
|
||
# So far, so good.
|
||
0:0:0) at_result="$at_result $at_were successful." at_color=$at_grn ;;
|
||
0:0:*) at_result="$at_result behaved as expected." at_color=$at_lgn ;;
|
||
|
||
# Some unexpected failures
|
||
0:*:0) at_result="$at_result $at_were run,
|
||
$at_fail_count failed unexpectedly." ;;
|
||
|
||
# Some failures, both expected and unexpected
|
||
0:*:1) at_result="$at_result $at_were run,
|
||
$at_total_fail_count failed ($at_xfail_count expected failure)." ;;
|
||
0:*:*) at_result="$at_result $at_were run,
|
||
$at_total_fail_count failed ($at_xfail_count expected failures)." ;;
|
||
|
||
# No unexpected failures, but some xpasses
|
||
*:0:*) at_result="$at_result $at_were run,
|
||
$at_xpass_count passed unexpectedly." ;;
|
||
|
||
# No expected failures, but failures and xpasses
|
||
*:1:0) at_result="$at_result $at_were run,
|
||
$at_unexpected_count did not behave as expected dnl
|
||
($at_fail_count unexpected failure)." ;;
|
||
*:*:0) at_result="$at_result $at_were run,
|
||
$at_unexpected_count did not behave as expected dnl
|
||
($at_fail_count unexpected failures)." ;;
|
||
|
||
# All of them.
|
||
*:*:1) at_result="$at_result $at_were run,
|
||
$at_xpass_count passed unexpectedly,
|
||
$at_total_fail_count failed ($at_xfail_count expected failure)." ;;
|
||
*:*:*) at_result="$at_result $at_were run,
|
||
$at_xpass_count passed unexpectedly,
|
||
$at_total_fail_count failed ($at_xfail_count expected failures)." ;;
|
||
esac
|
||
|
||
if test $at_skip_count = 0 && test $at_run_count -gt 1; then
|
||
at_result="All $at_result"
|
||
fi
|
||
fi
|
||
|
||
# Now put skips in the mix.
|
||
case $at_skip_count in
|
||
0) ;;
|
||
1) at_result="$at_result
|
||
1 test was skipped." ;;
|
||
*) at_result="$at_result
|
||
$at_skip_count tests were skipped." ;;
|
||
esac
|
||
|
||
if test $at_unexpected_count = 0; then
|
||
echo "$at_color$at_result$at_std"
|
||
echo "$at_result" >&AS_MESSAGE_LOG_FD
|
||
else
|
||
echo "${at_color}ERROR: $at_result$at_std" >&2
|
||
echo "ERROR: $at_result" >&AS_MESSAGE_LOG_FD
|
||
{
|
||
echo
|
||
AS_BOX([Summary of the failures.])
|
||
|
||
# Summary of failed and skipped tests.
|
||
if test $at_fail_count != 0; then
|
||
echo "Failed tests:"
|
||
$SHELL "$at_myself" $at_fail_list --list
|
||
echo
|
||
fi
|
||
if test $at_skip_count != 0; then
|
||
echo "Skipped tests:"
|
||
$SHELL "$at_myself" $at_skip_list --list
|
||
echo
|
||
fi
|
||
if test $at_xpass_count != 0; then
|
||
echo "Unexpected passes:"
|
||
$SHELL "$at_myself" $at_xpass_list --list
|
||
echo
|
||
fi
|
||
if test $at_fail_count != 0; then
|
||
AS_BOX([Detailed failed tests.])
|
||
echo
|
||
for at_group in $at_fail_list
|
||
do
|
||
at_group_normalized=$at_group
|
||
_AT_NORMALIZE_TEST_GROUP_NUMBER(at_group_normalized)
|
||
cat "$at_suite_dir/$at_group_normalized/$as_me.log"
|
||
echo
|
||
done
|
||
echo
|
||
fi
|
||
if test -n "$at_top_srcdir"; then
|
||
AS_BOX([${at_top_build_prefix}config.log])
|
||
sed 's/^/| /' ${at_top_build_prefix}config.log
|
||
echo
|
||
fi
|
||
} >&AS_MESSAGE_LOG_FD
|
||
|
||
AS_BOX([$as_me.log was created.])
|
||
|
||
echo
|
||
if $at_debug_p; then
|
||
at_msg='per-test log files'
|
||
else
|
||
at_msg="\`${at_testdir+${at_testdir}/}$as_me.log'"
|
||
fi
|
||
AS_ECHO(["Please send $at_msg and all information you think might help:
|
||
|
||
To: <AT_PACKAGE_BUGREPORT>
|
||
Subject: @<:@AT_PACKAGE_STRING@:>@ $as_me: dnl
|
||
$at_fail_list${at_fail_list:+ failed${at_xpass_list:+, }}dnl
|
||
$at_xpass_list${at_xpass_list:+ passed unexpectedly}
|
||
|
||
You may investigate any problem if you feel able to do so, in which
|
||
case the test suite provides a good starting point. Its output may
|
||
be found below \`${at_testdir+${at_testdir}/}$as_me.dir'.
|
||
"])
|
||
exit 1
|
||
fi
|
||
|
||
exit 0
|
||
|
||
m4_text_box([Actual tests.])
|
||
m4_divert_pop([TESTS])dnl
|
||
dnl End of AT_INIT: divert to KILL, only test groups are to be
|
||
dnl output, the rest is ignored. Current diversion is BODY, inherited
|
||
dnl from M4sh.
|
||
m4_divert([KILL])
|
||
])# AT_INIT
|
||
|
||
|
||
# _AT_ARG_OPTION(OPTIONS,HELP-TEXT,[ARGS],[ACTION-IF-GIVEN],
|
||
# [ACTION-IF-NOT-GIVEN])
|
||
# ----------------------------------------------------------
|
||
# Internal implementation of AT_ARG_OPTION & AT_ARG_OPTION_ARG
|
||
m4_defun([_AT_ARG_OPTION],
|
||
[m4_divert_once([HELP_OTHER],
|
||
[cat <<_ATEOF || at_write_fail=1
|
||
|
||
Other options:
|
||
_ATEOF
|
||
])dnl m4_divert_once HELP_OTHER
|
||
m4_divert_text([HELP_OTHER],
|
||
[cat <<_ATEOF || at_write_fail=1
|
||
$2
|
||
_ATEOF])dnl
|
||
dnl Turn our options into our desired strings
|
||
m4_ifdef([AT_first_option],[m4_undefine([AT_first_option])])dnl
|
||
m4_ifdef([AT_case],[m4_undefine([AT_case])])dnl
|
||
m4_ifdef([AT_case_no],[m4_undefine([AT_case_no])])dnl
|
||
m4_ifdef([AT_case_arg],[m4_undefine([AT_case_arg])])dnl
|
||
m4_foreach([AT_option], m4_split(m4_normalize([$1]),[[ \|]+]),
|
||
[m4_define_default([AT_first_option],AT_option)dnl
|
||
m4_define_default([AT_first_option_tr],
|
||
[m4_bpatsubst(m4_defn([AT_first_option]), -, [_])])dnl
|
||
m4_append([AT_case],m4_if(m4_len(AT_option),1,[],[-])[-]AT_option, [ | ])dnl
|
||
m4_append([AT_case_no],[--no-]AT_option, [ | ])dnl
|
||
m4_append([AT_case_arg],
|
||
m4_if(m4_len(AT_option),1,[],[-])[-]AT_option[=*], [ | ])dnl
|
||
])dnl m4_foreach AT_option
|
||
dnl keep track so we or the user may process ACTION-IF-NOT-GIVEN
|
||
m4_divert_once([PARSE_ARGS_BEGIN],
|
||
[
|
||
##
|
||
## Set up package specific options.
|
||
##
|
||
])dnl
|
||
m4_divert_text([PARSE_ARGS_BEGIN],
|
||
[dnl Provide a default value for options without arguments.
|
||
m4_ifvaln([$3],,[at_arg_[]AT_first_option_tr=false])dnl
|
||
at_arg_given_[]AT_first_option_tr=false
|
||
])dnl m4_divert_text DEFAULTS
|
||
m4_divert_text([PARSE_ARGS],
|
||
[dnl Parse the options and args when necessary.
|
||
m4_ifvaln([$3],
|
||
[ AT_case )
|
||
at_prev=--AT_first_option_tr
|
||
;;
|
||
AT_case_arg )
|
||
at_arg_[]AT_first_option_tr=$at_optarg
|
||
at_arg_given_[]AT_first_option_tr=:
|
||
$4
|
||
;;],
|
||
[ AT_case )
|
||
at_optarg=:
|
||
at_arg_[]AT_first_option_tr=:
|
||
at_arg_given_[]AT_first_option_tr=:
|
||
m4_ifval([$4],[$4])[]dnl
|
||
;;
|
||
AT_case_no )
|
||
at_optarg=false
|
||
at_arg_[]AT_first_option_tr=false
|
||
at_arg_given_[]AT_first_option_tr=:
|
||
m4_ifval([$4],[$4])[]dnl
|
||
;;])dnl m4_ifvaln $3
|
||
])dnl m4_divert_text PARSE_ARGS
|
||
m4_ifvaln([$5],
|
||
[m4_divert_once([PARSE_ARGS_END],
|
||
[
|
||
##
|
||
## Process package specific options when _not_ supplied.
|
||
##])dnl m4_divert_once PARSE_ARGS_END
|
||
m4_divert_text([PARSE_ARGS_END],
|
||
[
|
||
AS_IF([$at_arg_given_[]AT_first_option_tr],,[$5])dnl
|
||
])dnl m4_divert_text PARSE_ARGS_END
|
||
])dnl m4_ifvaln $5
|
||
])dnl _AT_ARG_OPTION
|
||
|
||
|
||
# AT_ARG_OPTION(OPTIONS,HELP-TEXT,[ACTION-IF-GIVEN],[ACTION-IF-NOT-GIVEN])
|
||
# ------------------------------------------------------------------------
|
||
# Accept a list of space-separated OPTIONS, all aliases of the first one.
|
||
# Add HELP-TEXT to the HELP_OTHER diversion.
|
||
#
|
||
# Leading dashes should not be passed in OPTIONS. Users will be required
|
||
# to pass `--' before long options and `-' before single character options.
|
||
#
|
||
# $at_arg_OPTION will be set to `:' if this option is received, `false' if
|
||
# if --no-OPTION is received, and `false' by default.
|
||
#
|
||
# Run ACTION-IF-GIVEN each time an option in OPTIONS is encountered; here,
|
||
# $at_optarg will be set to `:' or `false' as appropriate. $at_optarg is
|
||
# actually just a copy of $at_arg_OPTION.
|
||
#
|
||
# ACTION-IF-NOT-GIVEN will be run once after option parsing is complete and
|
||
# if no option from OPTIONS was used.
|
||
m4_defun([AT_ARG_OPTION],[_AT_ARG_OPTION([$1],[$2],,[$3],[$4])])
|
||
|
||
|
||
# AT_ARG_OPTION_ARG(OPTIONS,HELP-TEXT,[ACTION-IF-GIVEN],[ACTION-IF-NOT-GIVEN])
|
||
# ----------------------------------------------------------------------------
|
||
# Accept a set of space-separated OPTIONS with arguments, all aliases of the
|
||
# first one. Add HELP-TEXT to the HELP_OTHER diversion.
|
||
#
|
||
# Leading dashes should not be passed in OPTIONS. Users will be required
|
||
# to pass `--' before long options and `-' before single character options.
|
||
#
|
||
# By default, any argument to these options will be assigned to the shell
|
||
# variable $at_arg_OPTION, where OPTION is the first option in OPTIONS with
|
||
# any `-' characters replaced with `_'.
|
||
#
|
||
# Run ACTION-IF-GIVEN each time an option in OPTIONS is encountered; here,
|
||
# $at_optarg will be set to the option argument. $at_optarg is actually just
|
||
# a copy of $at_arg_OPTION.
|
||
#
|
||
# ACTION-IF-NOT-GIVEN will be run once after option parsing is complete
|
||
# and if no option from OPTIONS was used.
|
||
m4_defun([AT_ARG_OPTION_ARG],[_AT_ARG_OPTION([$1],[$2],1,[$3],[$4])])
|
||
|
||
|
||
# AT_TESTED(PROGRAMS)
|
||
# -------------------
|
||
# Specify the list of programs exercised by the test suite. Their
|
||
# versions are logged, and in the case of embedded test suite, they
|
||
# must correspond to the version of the package. PATH should be
|
||
# already preset so the proper executable will be selected.
|
||
m4_define([AT_TESTED],
|
||
[m4_append_uniq_w([AT_tested], [$1])])
|
||
|
||
|
||
# AT_COPYRIGHT(TEXT, [FILTER = m4_newline])
|
||
# -----------------------------------------
|
||
# Emit TEXT, a copyright notice, in the top of the test suite and in
|
||
# --version output. Macros in TEXT are evaluated once. Process
|
||
# the --version output through FILTER (m4_newline, m4_do, and
|
||
# m4_copyright_condense are common filters).
|
||
m4_define([AT_COPYRIGHT],
|
||
[AS_COPYRIGHT([$1])[]]dnl
|
||
[m4_divert_text([VERSION_NOTICES],
|
||
[m4_default([$2], [m4_newline])([$1])])])# AT_COPYRIGHT
|
||
|
||
|
||
# AT_COLOR_TESTS
|
||
# --------------
|
||
# Enable colored test results if standard error is connected to a terminal.
|
||
m4_define([AT_COLOR_TESTS],
|
||
[m4_define([AT_color], [auto])])
|
||
|
||
# AT_SETUP(DESCRIPTION)
|
||
# ---------------------
|
||
# Start a group of related tests, all to be executed in the same subshell.
|
||
# The group is testing what DESCRIPTION says.
|
||
_AT_DEFINE_INIT([AT_SETUP],
|
||
[m4_ifdef([AT_ingroup], [m4_fatal([$0: nested AT_SETUP detected])],
|
||
[m4_define([AT_ingroup], [AS_ECHO(["$at_setup_line"]) >"$at_check_line_file"
|
||
])])
|
||
m4_ifdef([AT_keywords], [m4_undefine([AT_keywords])])
|
||
m4_define([AT_capture_files], [])
|
||
m4_define([AT_line], AT_LINE)
|
||
m4_define([AT_xfail], [at_xfail=no])
|
||
m4_define([AT_description], m4_expand([$1]))
|
||
m4_define([AT_ordinal], m4_incr(AT_ordinal))
|
||
m4_divert_push([TEST_GROUPS])dnl
|
||
[#AT_START_]AT_ordinal
|
||
at_fn_group_banner AT_ordinal 'm4_defn([AT_line])' \
|
||
"AS_ESCAPE(m4_dquote(m4_defn([AT_description])))" m4_format(["%*s"],
|
||
m4_max(0, m4_eval(47 - m4_qlen(m4_defn([AT_description])))), [])m4_if(
|
||
AT_banner_ordinal, [0], [], [ AT_banner_ordinal])
|
||
m4_divert_push([TEST_SCRIPT])dnl
|
||
])
|
||
|
||
|
||
# AT_FAIL_IF(SHELL-EXPRESSION)
|
||
# ----------------------------
|
||
# Make the test die with hard failure if SHELL-EXPRESSION evaluates to
|
||
# true (exitcode = 0).
|
||
_AT_DEFINE_SETUP([AT_FAIL_IF],
|
||
[dnl
|
||
dnl Try to limit the amount of conditionals that we emit.
|
||
m4_case([$1],
|
||
[], [],
|
||
[false], [],
|
||
[:], [_AT_CHECK_EXIT([], [99])],
|
||
[true], [_AT_CHECK_EXIT([], [99])],
|
||
[_AT_CHECK_EXIT([$1], [99])])])
|
||
|
||
|
||
# AT_SKIP_IF(SHELL-EXPRESSION)
|
||
# ----------------------------
|
||
# Skip the rest of the group if SHELL-EXPRESSION evaluates to true
|
||
# (exitcode = 0).
|
||
_AT_DEFINE_SETUP([AT_SKIP_IF],
|
||
[dnl
|
||
dnl Try to limit the amount of conditionals that we emit.
|
||
m4_case([$1],
|
||
[], [],
|
||
[false], [],
|
||
[:], [_AT_CHECK_EXIT([], [77])],
|
||
[true], [_AT_CHECK_EXIT([], [77])],
|
||
[_AT_CHECK_EXIT([$1], [77])])])
|
||
|
||
|
||
# AT_XFAIL_IF(SHELL-EXPRESSION)
|
||
# -----------------------------
|
||
# Set up the test to be expected to fail if SHELL-EXPRESSION evaluates to
|
||
# true (exitcode = 0).
|
||
_AT_DEFINE_SETUP([AT_XFAIL_IF],
|
||
[dnl
|
||
dnl Try to limit the amount of conditionals that we emit.
|
||
m4_case([$1],
|
||
[], [],
|
||
[false], [],
|
||
[:], [m4_define([AT_xfail], [at_xfail=yes])],
|
||
[true], [m4_define([AT_xfail], [at_xfail=yes])],
|
||
[m4_append([AT_xfail], [
|
||
$1 && at_xfail=yes])])])
|
||
|
||
|
||
# AT_KEYWORDS(KEYWORDS)
|
||
# ---------------------
|
||
# Declare a list of keywords associated to the current test group.
|
||
# Since the -k option is case-insensitive, the list is stored in lower case
|
||
# to avoid duplicates that differ only by case.
|
||
_AT_DEFINE_SETUP([AT_KEYWORDS],
|
||
[m4_append_uniq_w([AT_keywords], m4_tolower(_m4_expand([$1
|
||
])))])
|
||
|
||
|
||
# AT_CAPTURE_FILE(FILE)
|
||
# ---------------------
|
||
# If the current test group does not behave as expected, save the contents of
|
||
# FILE in the test suite log.
|
||
_AT_DEFINE_SETUP([AT_CAPTURE_FILE],
|
||
[m4_append_uniq([AT_capture_files], ["$1"], [ \
|
||
])])
|
||
|
||
|
||
# AT_CLEANUP
|
||
# ----------
|
||
# Complete a group of related tests.
|
||
_AT_DEFINE_INIT([AT_CLEANUP],
|
||
[m4_ifdef([AT_ingroup], [AT_ingroup[]_m4_undefine([AT_ingroup])],
|
||
[m4_fatal([$0: missing AT_SETUP detected])])dnl
|
||
m4_append([AT_help_all],
|
||
m4_defn([AT_ordinal]);m4_defn([AT_line]);m4_defn([AT_description]);dnl
|
||
m4_ifdef([AT_keywords], [m4_defn([AT_keywords])]);
|
||
)dnl
|
||
m4_divert_pop([TEST_SCRIPT])dnl Back to TEST_GROUPS
|
||
AT_xfail
|
||
(
|
||
AS_ECHO(["AT_ordinal. $at_setup_line: testing $at_desc ..."])
|
||
$at_traceon
|
||
m4_undivert([TEST_SCRIPT])dnl Insert the code here
|
||
set +x
|
||
$at_times_p && times >"$at_times_file"
|
||
) AS_MESSAGE_LOG_FD>&1 2>&1 AT_JOB_FIFO_OUT_FD>&- | eval $at_tee_pipe
|
||
read at_status <"$at_status_file"
|
||
[#AT_STOP_]AT_ordinal
|
||
m4_divert_pop([TEST_GROUPS])dnl Back to KILL.
|
||
])# AT_CLEANUP
|
||
|
||
|
||
# AT_BANNER([TEXT])
|
||
# -----------------
|
||
# Start a category of related test groups. If multiple groups are executed,
|
||
# output TEXT as a banner without any shell expansion, prior to any test
|
||
# from the category. If TEXT is empty, no banner is printed.
|
||
_AT_DEFINE_INIT([AT_BANNER],
|
||
[m4_ifdef([AT_ingroup], [m4_fatal([$0: nested AT_SETUP detected])])dnl
|
||
m4_define([AT_banner_ordinal], m4_incr(AT_banner_ordinal))
|
||
m4_divert_text([BANNERS],
|
||
[@%:@ Banner AT_banner_ordinal. AT_LINE
|
||
@%:@ Category starts at test group m4_incr(AT_ordinal).
|
||
at_banner_text_[]AT_banner_ordinal="AS_ESCAPE([$1])"])dnl
|
||
])# AT_BANNER
|
||
|
||
|
||
# AT_DATA(FILE, CONTENTS)
|
||
# -----------------------
|
||
# Initialize an input data FILE with given CONTENTS, which should be
|
||
# empty or end with a newline.
|
||
# This macro is not robust to active symbols in CONTENTS *on purpose*.
|
||
# If you don't want CONTENTS to be evaluated, quote it twice.
|
||
_AT_DEFINE_SETUP([AT_DATA],
|
||
[m4_if([$2], [], [: >$1],
|
||
[$2], [[]], [: >$1],
|
||
[cat >$1 <<'_ATEOF'
|
||
$2[]_ATEOF
|
||
])])
|
||
|
||
|
||
# AT_CHECK(COMMANDS, [STATUS = 0], STDOUT, STDERR,
|
||
# [RUN-IF-FAIL], [RUN-IF-PASS])
|
||
# ------------------------------------------------
|
||
# Execute a test by performing given shell COMMANDS. These commands
|
||
# should normally exit with STATUS, while producing expected STDOUT and
|
||
# STDERR contents. Shell metacharacters in STDOUT and STDERR are
|
||
# _not_ processed by the shell, but are treated as string literals.
|
||
#
|
||
# STATUS, STDOUT, and STDERR are not checked if equal to `ignore'.
|
||
#
|
||
# If STDOUT is `expout', then stdout is compared to the content of the file
|
||
# `expout'. Likewise for STDERR and `experr'.
|
||
#
|
||
# If STDOUT is `stdout', then the stdout is left in the file `stdout',
|
||
# likewise for STDERR and `stderr'. Don't do this:
|
||
#
|
||
# AT_CHECK([command >out])
|
||
# # Some checks on `out'
|
||
#
|
||
# do this instead:
|
||
#
|
||
# AT_CHECK([command], [], [stdout])
|
||
# # Some checks on `stdout'
|
||
#
|
||
# You might wonder why you can't just use `ignore', then directly use stdout
|
||
# and stderr left by the test suite:
|
||
#
|
||
# AT_CHECK([command], [], [ignore])
|
||
# AT_CHECK([check stdout])
|
||
#
|
||
# If the test suite always captured data in the file `stdout', then the
|
||
# second command would be trying to read and write from the same file, with
|
||
# undefined behavior. Therefore, the test suite actually captures data in
|
||
# an internal file of a different name, and only creates `stdout' when
|
||
# explicitly requested.
|
||
#
|
||
# Any line of stderr starting with leading blanks and a `+' are filtered
|
||
# out, since most shells when tracing include subshell traces in stderr.
|
||
# This may cause spurious failures when the test suite is run with `-x'.
|
||
#
|
||
_AT_DEFINE_SETUP([AT_CHECK],
|
||
[_AT_CHECK(m4_expand([$1]), [$2], AS_ESCAPE(m4_dquote(m4_expand([$3]))),
|
||
AS_ESCAPE(m4_dquote(m4_expand([$4]))), [$5], [$6])])
|
||
|
||
# AT_CHECK_UNQUOTED(COMMANDS, [STATUS = 0], STDOUT, STDERR,
|
||
# [RUN-IF-FAIL], [RUN-IF-PASS])
|
||
# ---------------------------------------------------------
|
||
# Like AT_CHECK, but do not AS_ESCAPE shell metacharacters in the STDOUT
|
||
# and STDERR arguments before running the comparison.
|
||
_AT_DEFINE_SETUP([AT_CHECK_UNQUOTED],
|
||
[_AT_CHECK(m4_expand([$1]), [$2], AS_ESCAPE(m4_dquote(m4_expand([$3])), [""]),
|
||
AS_ESCAPE(m4_dquote(m4_expand([$4])), [""]), [$5], [$6])])
|
||
|
||
# AT_CHECK_NOESCAPE(COMMANDS, [STATUS = 0], STDOUT, STDERR,
|
||
# [RUN-IF-FAIL], [RUN-IF-PASS])
|
||
# ---------------------------------------------------------
|
||
# Obsolete spelling of AT_CHECK_UNQUOTED.
|
||
m4_define([AT_CHECK_NOESCAPE],
|
||
[m4_warn([obsolete], [consider using AT_CHECK_UNQUOTED instead of $0])]dnl
|
||
[_AT_CHECK(m4_expand([$1]), [$2], m4_expand([$3]),
|
||
m4_expand([$4]), [$5], [$6])])
|
||
|
||
|
||
# _AT_DECIDE_TRACEABLE(COMMANDS)
|
||
# ------------------------------
|
||
# Worker for _AT_CHECK that expands to shell code. If COMMANDS are safe to
|
||
# trace with `set -x', the shell code will evaluate to true. Otherwise,
|
||
# the shell code will print a message stating an aspect of COMMANDS that makes
|
||
# tracing them unsafe, and evaluate to false.
|
||
#
|
||
# Tracing COMMANDS is not safe if they contain a command that spans multiple
|
||
# lines. When the test suite user passes `-x' or `--trace', the test suite
|
||
# precedes every command with a `set -x'. Since most tests expect a specific
|
||
# stderr, if only to confirm that it is empty, the test suite filters ^+ from
|
||
# the captured stderr before comparing with the expected stderr. If a command
|
||
# spans multiple lines, so will its trace, but a `+' only prefixes the first
|
||
# line of that trace:
|
||
#
|
||
# $ echo 'foo
|
||
# bar'
|
||
# => stdout
|
||
# foo
|
||
# bar
|
||
# => stderr
|
||
# + foo
|
||
# bar
|
||
#
|
||
# In a subset of cases, one could filter such extended shell traces from
|
||
# stderr. Since test commands spanning several lines are rare, I chose
|
||
# instead to simply not trace COMMANDS that could yield multiple trace lines.
|
||
# Distinguishing such COMMANDS became the task at hand.
|
||
#
|
||
# These features may cause a shell command to span multiple lines:
|
||
#
|
||
# (a) A quoted literal newline.
|
||
# Example:
|
||
# echo foo'
|
||
# 'bar
|
||
# M4 is a hostile language for the job of parsing COMMANDS to determine whether
|
||
# each literal newline is quoted, so we simply disable tracing for all COMMANDS
|
||
# that bear literal newlines.
|
||
#
|
||
# (b) A command substitution not subject to word splitting.
|
||
# Example:
|
||
# var=$(printf 'foo\nbar')
|
||
# Example:
|
||
# echo "`printf 'foo\\nbar`"
|
||
# One cannot know in general the number of lines a command substitution will
|
||
# yield without executing the substituted command. As such, we disable tracing
|
||
# for all COMMANDS containing these constructs.
|
||
#
|
||
# (c) A parameter expansion not subject to word splitting.
|
||
# Example:
|
||
# var=foo'
|
||
# 'bar
|
||
# echo "$var"
|
||
# Parameter expansions appear in COMMANDS with much greater frequency than do
|
||
# newlines and command substitutions, so disabling tracing for all such
|
||
# COMMANDS would much more substantially devalue `testsuite -x'. To determine
|
||
# which parameter expansions yield multiple lines, we escape all ``', `"',
|
||
# and `\' in a copy of COMMANDS and expand that string within double quotes
|
||
# at runtime. If the result of that expansion contains multiple lines, the
|
||
# test suite disables tracing for the command in question.
|
||
#
|
||
# This method leads the test suite to expand some parameters that the shell
|
||
# itself will never expand due to single-quotes or backslash escapes. This is
|
||
# not a problem for `$foo' expansions, which will simply yield the empty string
|
||
# or some unrelated value. A `${...}' expansion could actually form invalid
|
||
# shell code, however; consider `${=foo}'. Therefore, we disable tracing for
|
||
# all COMMANDS containing `${...}'. This affects few COMMANDS.
|
||
#
|
||
# This macro falls in a very hot path; the Autoconf test suite expands it 1640
|
||
# times as of this writing. To give a sense of the impact of the heuristics I
|
||
# just described, the test suite preemptively disables tracing for 31 of those,
|
||
# and 268 contain parameter expansions that require runtime evaluation. The
|
||
# balance are always safe to trace.
|
||
m4_define([_AT_DECIDE_TRACEABLE],
|
||
dnl Utility macro.
|
||
dnl
|
||
dnl Examine COMMANDS for a reason to never trace COMMANDS.
|
||
[m4_pushdef([at_reason],
|
||
m4_cond([m4_eval(m4_index([$1], [`]) >= 0)], [1],
|
||
[[a `...` command substitution]],
|
||
[m4_eval(m4_index([$1], [$(]) >= 0)], [1],
|
||
[[a $(...) command substitution]],
|
||
[m4_eval(m4_index([$1], [${]) >= 0)], [1],
|
||
[[a ${...} parameter expansion]],
|
||
[m4_eval(m4_index([$1], m4_newline) >= 0)], [1],
|
||
[[an embedded newline]],
|
||
[m4_eval(m4_bregexp([$1], [[^|]|[^|]]) >= 0)], [1],
|
||
[[a shell pipeline]],
|
||
[]))]dnl No reason.
|
||
[m4_if(m4_index(_m4_defn([at_reason]), [a]), [0],]dnl
|
||
dnl We know at build time that tracing COMMANDS is never safe.
|
||
[[at_fn_check_prepare_notrace '_m4_defn([at_reason])'],
|
||
m4_index([$1], [$]), [-1],]dnl
|
||
dnl We know at build time that tracing COMMANDS is always safe.
|
||
[[at_fn_check_prepare_trace],]dnl
|
||
dnl COMMANDS may contain parameter expansions; expand them at runtime.
|
||
[[at_fn_check_prepare_dynamic "AS_ESCAPE([[$1]], [`\"])"])[]]dnl
|
||
[_m4_popdef([at_reason])])
|
||
|
||
|
||
# AT_DIFF_STDERR/AT_DIFF_STDOUT
|
||
# -----------------------------
|
||
# These are subroutines of AT_CHECK. Using indirect dispatch is a tad
|
||
# faster than using m4_case, and these are called very frequently.
|
||
m4_define([AT_DIFF_STDERR(stderr)],
|
||
[echo stderr:; tee stderr <"$at_stderr"])
|
||
m4_define([AT_DIFF_STDERR(stderr-nolog)],
|
||
[echo stderr captured; cp "$at_stderr" stderr])
|
||
m4_define([AT_DIFF_STDERR(ignore)],
|
||
[echo stderr:; cat "$at_stderr"])
|
||
m4_define([AT_DIFF_STDERR(ignore-nolog)])
|
||
m4_define([AT_DIFF_STDERR(experr)],
|
||
[$at_diff experr "$at_stderr" || at_failed=:])
|
||
m4_define([AT_DIFF_STDERR()],
|
||
[at_fn_diff_devnull "$at_stderr" || at_failed=:])
|
||
|
||
m4_define([AT_DIFF_STDOUT(stdout)],
|
||
[echo stdout:; tee stdout <"$at_stdout"])
|
||
m4_define([AT_DIFF_STDOUT(stdout-nolog)],
|
||
[echo stdout captured; cp "$at_stdout" stdout])
|
||
m4_define([AT_DIFF_STDOUT(ignore)],
|
||
[echo stdout:; cat "$at_stdout"])
|
||
m4_define([AT_DIFF_STDOUT(ignore-nolog)])
|
||
m4_define([AT_DIFF_STDOUT(expout)],
|
||
[$at_diff expout "$at_stdout" || at_failed=:])
|
||
m4_define([AT_DIFF_STDOUT()],
|
||
[at_fn_diff_devnull "$at_stdout" || at_failed=:])
|
||
|
||
# _AT_CHECK(COMMANDS, [STATUS = 0], STDOUT, STDERR,
|
||
# [RUN-IF-FAIL], [RUN-IF-PASS])
|
||
# -------------------------------------------------
|
||
# Worker for AT_CHECK and AT_CHECK_UNQUOTED, with COMMANDS, STDOUT, and
|
||
# STDERR pre-expanded.
|
||
#
|
||
# Implementation Details
|
||
# ----------------------
|
||
# Ideally, we would like to run
|
||
#
|
||
# ( $at_traceon; COMMANDS >at-stdout 2> at-stderr )
|
||
#
|
||
# but we must group COMMANDS as it is not limited to a single command, and
|
||
# then the shells will save the traces in at-stderr. So we have to filter
|
||
# them out when checking stderr, and we must send them into the test suite's
|
||
# stderr to honor -x properly. Since only the first line of the trace of a
|
||
# multiline command starts with a `+', and I know of no straightforward way to
|
||
# filter out the unadorned trace lines, we disable shell tracing entirely for
|
||
# commands that could span multiple lines.
|
||
#
|
||
# Limiting COMMANDS to a single command is not good either, since then
|
||
# the user herself would use {} or (), and then we face the same problem.
|
||
#
|
||
# But then, there is no point in running
|
||
#
|
||
# ( $at_traceon { $1 ; } >at-stdout 2>at-stder1 )
|
||
#
|
||
# instead of the simpler
|
||
#
|
||
# ( $at_traceon; $1 ) >at-stdout 2>at-stder1
|
||
#
|
||
# Note that we truncate and append to the output files, to avoid losing
|
||
# output from multiple concurrent processes, e.g., an inner testsuite
|
||
# with parallel jobs.
|
||
m4_define([_AT_CHECK],
|
||
[m4_define([AT_ingroup])]dnl
|
||
[{ set +x
|
||
AS_ECHO(["$at_srcdir/AT_LINE: AS_ESCAPE([[$1]])"])
|
||
_AT_DECIDE_TRACEABLE([$1]) _AT_LINE_ESCAPED
|
||
( $at_check_trace; [$1]
|
||
) >>"$at_stdout" 2>>"$at_stderr" AS_MESSAGE_LOG_FD>&-
|
||
at_status=$? at_failed=false
|
||
$at_check_filter
|
||
m4_ifdef([AT_DIFF_STDERR($4)], [m4_indir([AT_DIFF_STDERR($4)])],
|
||
[echo >>"$at_stderr"; AS_ECHO([["$4"]]) | \
|
||
$at_diff - "$at_stderr" || at_failed=:])
|
||
m4_ifdef([AT_DIFF_STDOUT($3)], [m4_indir([AT_DIFF_STDOUT($3)])],
|
||
[echo >>"$at_stdout"; AS_ECHO([["$3"]]) | \
|
||
$at_diff - "$at_stdout" || at_failed=:])
|
||
m4_if([$2], [ignore], [at_fn_check_skip],
|
||
[at_fn_check_status m4_default([$2], [0])]) $at_status "$at_srcdir/AT_LINE"
|
||
m4_ifvaln([$5$6], [AS_IF($at_failed, [$5], [$6])])]dnl
|
||
[$at_failed && at_fn_log_failure AT_capture_files
|
||
$at_traceon; }
|
||
])# _AT_CHECK
|
||
|
||
# _AT_CHECK_EXIT(COMMANDS, [EXIT-STATUS-IF-PASS])
|
||
# -----------------------------------------------
|
||
# Minimal version of _AT_CHECK for AT_SKIP_IF and AT_FAIL_IF.
|
||
m4_define([_AT_CHECK_EXIT],
|
||
[m4_define([AT_ingroup])]dnl
|
||
[AS_ECHO(_AT_LINE_ESCAPED) >"$at_check_line_file"
|
||
m4_ifval([$1], [($1) \
|
||
&& ])at_fn_check_skip $2 "$at_srcdir/AT_LINE"])# _AT_CHECK_EXIT
|