# rigid rules.
# I have found the '## no critic' method for locally deactivating specific
-# policies with comments to be very troublesome to use.
+# policies to be too buggy to use.
# severity = 1 gives the most strict checking.
severity = 1
# There is one complex regex in Tokenizer.pm for scanning numbers. It is
# well commented and easy to read, and any changes would make it harder
-# to read. So we have to skip this.
-[-RegularExpressions::ProhibitComplexRegexes]
+# to read. But rather than deactivate this policy, I have adjusted the
+# maximum number of characters to let this regex pass.
+[RegularExpressions::ProhibitComplexRegexes]
+max_characters=250
# A problem with ReqireExtendedFormatting is that it makes things needlessly
# complex when matching things like line feeds and carriage returns. So
#---------------------------------
# The next token after a ';' and label (type 'J') starts a new stmt
# The ci after a C-style for ';' (type 'f') is handled similarly.
- # TODO: There is type 'f' redundant coding in sub respace which can
- # be removed if this becomes the standard routine for computing ci.
elsif ( $type eq ';' || $type eq 'J' || $type eq 'f' ) {
$ci_next = 0;
if ( $is_closing_type{$last_type} ) { $ci_this = $ci_last }