diff -Nru asciidoctor-1.5.5/.appveyor.yml asciidoctor-2.0.10/.appveyor.yml --- asciidoctor-1.5.5/.appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/.appveyor.yml 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,27 @@ +# AppVeyor automatically skips the build if the commit contains [ci skip] or [skip ci] +version: '{build}' +skip_tags: true +clone_depth: 2 +environment: + matrix: + # see https://www.appveyor.com/docs/linux-images-software/#ruby for list of installed Ruby runtimes + - ruby_version: '26' + - ruby_version: '26-x64' + - ruby_version: '25' + - ruby_version: '24' + - ruby_version: '23' +init: +# Prepend Ruby path from matrix while removing original Ruby entry (C:\Ruby193\bin) +- set PATH=C:\Ruby%ruby_version%\bin;%PATH:C:\Ruby193\bin;=% +- echo %PATH% +- ruby -v +- gem -v +- bundler --version +install: +- bundle --jobs=3 --path=.bundle\gems +build_script: +- bundle exec rake build +test_script: +- bundle exec rake test:all +#artifacts: +#- path: pkg\*.gem diff -Nru asciidoctor-1.5.5/appveyor.yml asciidoctor-2.0.10/appveyor.yml --- asciidoctor-1.5.5/appveyor.yml 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -version: '{build}' -skip_tags: true -# AppVeyor automatically skips the build if the commit contains [ci skip] or [skip ci] -#skip_commits: -# message: /\[ci skip\]/ -clone_depth: 10 -environment: - matrix: - # there's a problem loading nokogiri 1.5.11 in Ruby 2.x on Windows - #- ruby_version: '21' - #- ruby_version: '21-x64' - #- ruby_version: '200' - #- ruby_version: '200-x64' - - ruby_version: '193' -install: - # Take default Ruby out of path - - SET PATH=%PATH:C:\Ruby193\bin;=% - # Add Ruby to path from build matrix - - SET PATH=C:\Ruby%ruby_version%\bin;%PATH% - - echo %PATH% - - ruby --version - - gem --version - - gem install bundler --quiet --no-ri --no-rdoc - - bundler --version - - bundle -build_script: - - bundle exec rake build -test_script: - - bundle exec rake test:all -artifacts: - - path: pkg\*.gem diff -Nru asciidoctor-1.5.5/asciidoctor.gemspec asciidoctor-2.0.10/asciidoctor.gemspec --- asciidoctor-1.5.5/asciidoctor.gemspec 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/asciidoctor.gemspec 2019-08-18 16:11:54.000000000 +0000 @@ -1,49 +1,54 @@ -# -*- encoding: utf-8 -*- -require File.expand_path '../lib/asciidoctor/version', __FILE__ -require 'open3' unless defined? Open3 +begin + require_relative 'lib/asciidoctor/version' +rescue LoadError + require 'asciidoctor/version' +end Gem::Specification.new do |s| s.name = 'asciidoctor' s.version = Asciidoctor::VERSION - s.summary = 'An implementation of the AsciiDoc text processor and publishing toolchain in Ruby' - s.description = 'A fast, open source text processor and publishing toolchain, written in Ruby, for converting AsciiDoc content to HTML5, DocBook 5 (or 4.5) and other formats.' + s.summary = 'An implementation of the AsciiDoc text processor and publishing toolchain' + s.description = 'A fast, open source text processor and publishing toolchain for converting AsciiDoc content to HTML 5, DocBook 5, and other formats.' s.authors = ['Dan Allen', 'Sarah White', 'Ryan Waldron', 'Jason Porter', 'Nick Hengeveld', 'Jeremy McAnally'] s.email = ['dan.j.allen@gmail.com'] - s.homepage = 'http://asciidoctor.org' + s.homepage = 'https://asciidoctor.org' s.license = 'MIT' + # NOTE required ruby version is informational only; it's not enforced since it can't be overridden and can cause builds to break + #s.required_ruby_version = '>= 2.3.0' + s.metadata = { + 'bug_tracker_uri' => 'https://github.com/asciidoctor/asciidoctor/issues', + 'changelog_uri' => 'https://github.com/asciidoctor/asciidoctor/blob/master/CHANGELOG.adoc', + 'mailing_list_uri' => 'http://discuss.asciidoctor.org', + 'source_code_uri' => 'https://github.com/asciidoctor/asciidoctor' + } - files = begin - (result = Open3.popen3('git ls-files -z') {|_, out| out.read }.split %(\0)).empty? ? Dir['**/*'] : result + # NOTE the logic to build the list of files is designed to produce a usable package even when the git command is not available + begin + files = (result = `git ls-files -z`.split ?\0).empty? ? Dir['**/*'] : result rescue - Dir['**/*'] + files = Dir['**/*'] end - s.files = files.grep(/^(?:(?:data|lib|man)\/.+|Gemfile|Rakefile|(?:CHANGELOG|CONTRIBUTING|LICENSE|README(?:-\w+)?)\.adoc|#{s.name}\.gemspec)$/) - s.executables = files.grep(/^bin\//).map {|f| File.basename f } - s.test_files = files.grep(/^(?:test\/.*_test\.rb|features\/.*\.(?:feature|rb))$/) + s.files = files.grep %r/^(?:(?:data|lib|man)\/.+|LICENSE|(?:CHANGELOG|README(?:-\w+)?)\.adoc|\.yardopts|#{s.name}\.gemspec)$/ + s.executables = (files.grep %r/^bin\//).map {|f| File.basename f } s.require_paths = ['lib'] - s.has_rdoc = true - s.rdoc_options = ['--charset=UTF-8'] - s.extra_rdoc_files = ['CHANGELOG.adoc', 'CONTRIBUTING.adoc', 'LICENSE.adoc'] + #s.test_files = files.grep %r/^(?:features|test)\/.+$/ # asciimath is needed for testing AsciiMath in DocBook backend - s.add_development_dependency 'asciimath', '~> 1.0.2' + s.add_development_dependency 'asciimath', '~> 1.0.0' # coderay is needed for testing syntax highlighting s.add_development_dependency 'coderay', '~> 1.1.0' - s.add_development_dependency 'cucumber', '~> 1.3.1' - # erubis is needed for testing use of alternative eRuby impls + # concurrent-ruby, haml, slim, and tilt are needed for testing custom templates + s.add_development_dependency 'concurrent-ruby', '~> 1.1.0' + s.add_development_dependency 'cucumber', '~> 3.1.0' + # erubis is needed for testing alternate eRuby impls s.add_development_dependency 'erubis', '~> 2.7.0' - # haml is needed for testing custom templates - s.add_development_dependency 'haml', '~> 4.0.0' - s.add_development_dependency 'nokogiri', '~> 1.5.10' - s.add_development_dependency 'rake', '~> 10.0.0' - s.add_development_dependency 'rspec-expectations', '~> 2.14.0' - # slim is needed for testing custom templates - s.add_development_dependency 'slim', '~> 2.0.0' - s.add_development_dependency 'thread_safe', '~> 0.3.4' - # tilt is needed for testing custom templates + s.add_development_dependency 'haml', '~> 5.0.0' + s.add_development_dependency 'minitest', '~> 5.11.0' + s.add_development_dependency 'nokogiri', '~> 1.10.0' + s.add_development_dependency 'rake', '~> 12.3.0' + # Asciidoctor supports Rouge >= 2 + s.add_development_dependency 'rouge', '~> 3.3.0' + s.add_development_dependency 'rspec-expectations', '~> 3.8.0' + s.add_development_dependency 'slim', '~> 4.0.0' s.add_development_dependency 'tilt', '~> 2.0.0' - s.add_development_dependency 'yard', '~> 0.8.7' - s.add_development_dependency 'yard-tomdoc', '~> 0.7.0' - s.add_development_dependency 'minitest', '~> 5.3.0' - s.add_development_dependency 'racc', '~> 1.4.10' if RUBY_VERSION == '2.1.0' && RUBY_ENGINE == 'rbx' end diff -Nru asciidoctor-1.5.5/benchmark/benchmark.rb asciidoctor-2.0.10/benchmark/benchmark.rb --- asciidoctor-1.5.5/benchmark/benchmark.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/benchmark/benchmark.rb 2019-08-18 16:11:54.000000000 +0000 @@ -21,7 +21,8 @@ $ RUBY_GC_MALLOC_LIMIT=90000000 RUBY_FREE_MIN=650000 ruby benchmark.rb userguide-loop 10 .Ruby >= 2.1 - $ RUBY_GC_MALLOC_LIMIT=128000000 RUBY_GC_OLDMALLOC_LIMIT=128000000 RUBY_GC_HEAP_INIT_SLOTS=800000 RUBY_GC_HEAP_FREE_SLOTS=800000 RUBY_GC_HEAP_GROWTH_MAX_SLOTS=250000 RUBY_GC_HEAP_GROWTH_FACTOR=2 ruby benchmark.rb userguide-loop 10 + $ RUBY_GC_MALLOC_LIMIT=128000000 RUBY_GC_OLDMALLOC_LIMIT=128000000 RUBY_GC_HEAP_INIT_SLOTS=10000000 RUBY_GC_HEAP_FREE_SLOTS=10000000 RUBY_GC_HEAP_GROWTH_MAX_SLOTS=250000 RUBY_GC_HEAP_GROWTH_FACTOR=2 ruby benchmark.rb userguide-loop 10 + $ RUBY_GC_MALLOC_LIMIT=128000000 RUBY_GC_OLDMALLOC_LIMIT=128000000 RUBY_GC_HEAP_INIT_SLOTS=20000000 RUBY_GC_HEAP_FREE_SLOTS=1000000 RUBY_GC_HEAP_GROWTH_MAX_SLOTS=250000 RUBY_GC_HEAP_GROWTH_FACTOR=2 ruby benchmark.rb userguide-loop 10 Asciidoctor starts with ~ 12,500 objects, adds ~ 300,000 each run, so tune RUBY_GC_HEAP_* accordingly @@ -78,7 +79,7 @@ result = Benchmark.bmbm {|bm| bm.report(%(Convert #{sample_file} (x#{$repeat}))) { $repeat.times { - Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :eruby => 'erubis', :header_footer => true, :to_file => false, :attributes => {'linkcss' => '', 'toc' => nil, 'numbered' => nil, 'icons' => nil, 'compat-mode' => ''} + Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :eruby => 'erubis', :header_footer => true, :to_file => false, :attributes => {'stylesheet' => nil, 'toc' => nil, 'numbered' => nil, 'icons' => nil, 'compat-mode' => ''} } } } @@ -94,18 +95,22 @@ backend = ENV['BENCH_BACKEND'] || 'html5' fetch_userguide if sample_file == 'sample-data/userguide.adoc' && !(File.exist? sample_file) - best = nil + timings = [] 2.times.each do - outer_start = Time.now + loop_timings = [] (1..$repeat).each do - inner_start = Time.now - Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :eruby => 'erubis', :header_footer => true, :to_file => false, :attributes => {'linkcss' => '', 'toc' => nil, 'numbered' => nil, 'icons' => nil, 'compat-mode' => ''} - puts (elapsed = Time.now - inner_start) - best = (best ? [best, elapsed].min : elapsed) + start = Time.now + Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :eruby => 'erubis', :header_footer => true, :to_file => false, :attributes => { 'stylesheet' => nil, 'toc' => nil, 'numbered' => nil, 'icons' => nil, 'compat-mode' => '' } + loop_timings << (Time.now - start) end - puts %(Run Total: #{Time.now - outer_start}) + timings << loop_timings end - puts %(Best Time: #{best}) + best_time = nil + timings.each do |loop_timings| + puts %(#{loop_timings * "\n"}\nRun Total: #{loop_timings.reduce :+}) + best_time = best_time ? [best_time, loop_timings.min].min : loop_timings.min + end + puts %(Best Time: #{best_time}) when 'mdbasics-loop' require '../lib/asciidoctor.rb' @@ -113,17 +118,21 @@ sample_file = ENV['BENCH_TEST_FILE'] || 'sample-data/mdbasics.adoc' backend = ENV['BENCH_BACKEND'] || 'html5' - best = nil + timings = [] 2.times do - outer_start = Time.now + loop_timings = [] (1..$repeat).each do - inner_start = Time.now - Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :header_footer => false, :to_file => false, :attributes => {'linkcss' => '', 'idprefix' => '', 'idseparator' => '-', 'showtitle' => ''} - puts (elapsed = Time.now - inner_start) - best = (best ? [best, elapsed].min : elapsed) + start = Time.now + Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :header_footer => false, :to_file => false, :attributes => { 'stylesheet' => nil, 'idprefix' => '', 'idseparator' => '-', 'showtitle' => '' } + loop_timings << (Time.now - start) end - puts %(Run Total: #{Time.now - outer_start}) + timings << loop_timings + end + best_time = nil + timings.each do |loop_timings| + puts %(#{loop_timings * "\n"}\nRun Total: #{loop_timings.reduce :+}) + best_time = best_time ? [best_time, loop_timings.min].min : loop_timings.min end - puts %(Best Time: #{best}) + puts %(Best Time: #{best_time}) end diff -Nru asciidoctor-1.5.5/benchmark/.ruby-version asciidoctor-2.0.10/benchmark/.ruby-version --- asciidoctor-1.5.5/benchmark/.ruby-version 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/benchmark/.ruby-version 2019-08-18 16:11:54.000000000 +0000 @@ -1 +1 @@ -2.1 +2.6 diff -Nru asciidoctor-1.5.5/bin/asciidoctor asciidoctor-2.0.10/bin/asciidoctor --- asciidoctor-1.5.5/bin/asciidoctor 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/bin/asciidoctor 2019-08-18 16:11:54.000000000 +0000 @@ -1,13 +1,14 @@ #!/usr/bin/env ruby +# frozen_string_literal: true -require 'rubygems' unless defined? Gem - -if File.exist?(asciidoctor = (File.expand_path '../../lib/asciidoctor', __FILE__)) +asciidoctor = File.absolute_path '../lib/asciidoctor.rb', __dir__ +if File.exist? asciidoctor require asciidoctor + require File.join Asciidoctor::LIB_DIR, 'asciidoctor/cli' else require 'asciidoctor' + require 'asciidoctor/cli' end -require 'asciidoctor/cli' invoker = Asciidoctor::Cli::Invoker.new ARGV GC.start diff -Nru asciidoctor-1.5.5/bin/asciidoctor-safe asciidoctor-2.0.10/bin/asciidoctor-safe --- asciidoctor-1.5.5/bin/asciidoctor-safe 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/bin/asciidoctor-safe 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -#!/usr/bin/env ruby - -require 'rubygems' unless defined? Gem - -if File.exist?(asciidoctor = (File.expand_path '../../lib/asciidoctor', __FILE__)) - require asciidoctor -else - require 'asciidoctor' -end -require 'asciidoctor/cli' - -invoker = Asciidoctor::Cli::Invoker.new(ARGV + ['-S', 'safe']) -GC.start -invoker.invoke! -exit invoker.code diff -Nru asciidoctor-1.5.5/CHANGELOG.adoc asciidoctor-2.0.10/CHANGELOG.adoc --- asciidoctor-1.5.5/CHANGELOG.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/CHANGELOG.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -1,5 +1,5 @@ = Asciidoctor Changelog -:uri-asciidoctor: http://asciidoctor.org +:uri-asciidoctor: https://asciidoctor.org :uri-asciidoc: {uri-asciidoctor}/docs/what-is-asciidoc :uri-repo: https://github.com/asciidoctor/asciidoctor :icons: font @@ -8,26 +8,923 @@ :star: ★ endif::[] -{uri-asciidoctor}[Asciidoctor] is a _fast_, open source text processor and publishing toolchain for converting {uri-asciidoc}[AsciiDoc] content into HTML5, DocBook 5 (or 4.5) and other formats. +{uri-asciidoctor}[Asciidoctor] is a _fast_, open source text processor and publishing toolchain for converting {uri-asciidoc}[AsciiDoc] content into HTML 5, DocBook 5, and other formats. This document provides a high-level view of the changes introduced in Asciidoctor by release. For a detailed view of what has changed, refer to the {uri-repo}/commits/master[commit history] on GitHub. // tag::compact[] +== 2.0.10 (2019-05-31) - @mojavelinux + +Bug Fixes:: + + * fix Asciidoctor.convert_file to honor `header_footer: false` option when writing to file (#3316) + * fix placement of title on excerpt block (#3289) + * always pass same options to SyntaxHighlighter#docinfo, regardless of value of location argument + * fix signature of SyntaxHighlighter#docinfo method (#3300) + * when `icons` is set to `image`, enable image icons, but don't use it as the value of the `icontype` attribute (#3308) + +// end::compact[] +== 2.0.9 (2019-04-30) - @mojavelinux + +Bug Fixes:: + + * process multiple single-item menu macros in same line (#3279) + * register images in catalog correctly (#3283) + * rename AbstractNode#options method to AbstractNode#enabled_options so it doesn't get shadowed by Document#options (#3282) + * don't fail to convert document if alt attribute is not set on block or inline image (typically by an extension) + * fix lineno of source location on blocks that follow a detached list continuation (#3281) + * assume inline image type is "image" if not set (typically by an extension) + +== 2.0.8 (2019-04-22) - @mojavelinux + +Bug Fixes:: + + * restore background color applied to literal blocks by default stylesheet (#3258) + * use portability constants (CC_ALL, CC_ANY) in regular expressions defined in built-in converters (DocBook5 and ManPage) + * use portability constant (CC_ANY) in regular expression for custom inline macros + * use smarter margin collapsing for AsciiDoc table cell content; prevent passthrough content from being cut off (#3256) + * don't limit footnote ref to ASCII charset; allow any word character in Unicode to be used (#3269) + +Improvements:: + + * register_for methods accept arguments as symbols (#3274) + * use Concurrent::Map instead of Concurrent::Hash in template converter + * use module_function keyword to define methods in Helpers + * move regular expression definitions to separate source file (internal change) + +== 2.0.7 (2019-04-13) - @mojavelinux + +Bug Fixes:: + + * fix crash when resolving ID from text and at least one candidate contains an unresolved xref (#3254) + * fix compatibility with Rouge 2.0 + +Improvements:: + + * improve documentation for the `-a` CLI option; explain that `@` modifier can be placed at end of name as alternative to end of value + * move source for main API entry points (load, load_file, convert, convert_file) to separate files (internal change) + * define main API entry points (load, load_file, convert, convert_file) as module functions + +Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). + +== 2.0.6 (2019-04-04) - @mojavelinux + +Bug Fixes:: + + * assume implicit AsciiDoc extension on interdoc xref macro target with no extension (e.g., `document#`); restores 1.5.x behavior (#3231) + * don't fail to load application if call to Dir.home fails; use a rescue with fallback values (#3238) + * Helpers.rootname should only consider final path segment when dropping file extension + +Improvements:: + + * implement Helpers.extname as a more efficient and flexible File.extname method + * check for AsciiDoc file extension using end_with? instead of resolving the extname and using a lookup + +Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). + +== 2.0.5 (2019-04-01) - @mojavelinux + +Bug Fixes:: + + * fix crash when source highlighter is Rouge and source language is not set on block (#3223) + * update CLI and SyntaxHighlighter to allow Asciidoctor to load cleanly on Ruby 2.0 - 2.2 + * CLI should use $stdin instead of STDIN to be consistent with the use of $stdout + * mark encoding of stdio objects used in CLI as UTF-8 (#3225) + * make Asciidoctor::SyntaxHighlighter::Config.register_for method public as documented + +Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). + +== 2.0.4 (2019-03-31) - @mojavelinux + +Bug Fixes:: + + * allow Asciidoctor to load cleanly on Ruby 2.0 - 2.2 for distributions that provide support for these older Ruby versions + * make Asciidoctor::Converter::Config.register_for method public as documented + * remove unused Asciidoctor::Converter::BackendTraits#derive_backend_traits private method + * move Asciidoctor::Converter::BackendTraits.derive_backend_traits method to Asciidoctor::Converter + * mark render and render_file methods as deprecated in API docs + +Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). + +== 2.0.3 (2019-03-28) - @mojavelinux + +Bug Fixes:: + + * fix crash when attrlist is used on literal monospace phrase (#3216) + * update use of magic regexp variables to fix compatibility with Opal / Asciidoctor.js (#3214) + +Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). + +== 2.0.2 (2019-03-26) - @mojavelinux + +Bug Fixes:: + + * apply verbatim substitutions to literal paragraphs attached to list item (#3205) + * implement #lines and #source methods on Table::Cell based on cell text (#3207) + +Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). + +== 2.0.1 (2019-03-25) - @mojavelinux + +Bug Fixes:: + + * convert titles of cataloged block and section nodes containing attribute references eagerly to resolve attributes while in scope (#3202) + * customize MathJax (using a postfilter hook) to apply displaymath formatting to AsciiMath block (#2498) + * fix misspelling of deprecated default_attrs DSL function (missing trailing "s") + * remove unused location property (attr_accessor :location) on DocinfoProcessor class + * look for deprecated extension option :pos_attrs if :positional_attrs option is missing (#3199) + * add detail to load error message if path differs from gem name (#1884) + +Build / Infrastructure:: + + * bundle .yardopts in RubyGem (#3193) + +Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). + +== 2.0.0 (2019-03-22) - @mojavelinux + +Enhancements / Compliance:: + + * drop support for Ruby < 2.3 and JRuby < 9.1 and remove workarounds (#2764) + * drop support for Slim < 3 (#2998) + * drop the converter for the docbook45 backend (#3005) + * apply substitutions to section and block titles in normal substitution order (#1173) + * make syntax highlighter pluggable; extract all logic into adapter classes (#2106) + * add syntax highlighter adapter for Rouge (#1040) + * redesign Converter API based on SyntaxHighlighter API; remap deprecated API to new API to ensure compatibility (#2891) + * repurpose built-in converters as regular converters (#2891) + * make registration and resolution of global converters thread-safe (#2891) + * fold the default converter factory into the Converter module (#2891) + * add a default implementation for Converter#convert in the Base converter (#2891) + * rename Converter::BackendInfo to Converter::BackendTraits; map backend_info to new backend_traits method (#2891) + * allow built-in converter classes to be resolved using Converter#for and instantiated using Converter#create (#2891) + * allow converter factory to be passed using :converter_factory API option (#2891) + * honor htmlsyntax if defined on converter (#2891) + * add backend_traits_source keyword argument to CompositeConverter constructor (#2891) + * add support for start attribute when using prettify to highlight source blocks with line numbering enabled + * use String#encode to encode String as UTF-8 instead of using String#force_encoding (#2764) + * add FILE_READ_MODE, URI_READ_MODE, and FILE_WRITE_MODE constants to control open mode when reading files and URIs and writing files (#2764) + * set visibility of private and protected methods (#2764) + * always run docinfo processor extensions regardless of safe mode (gives control to extension) (#2966) + * use infinitive verb form for extension DSL method names; map deprecated method names where appropriate + * add docinfo insertion slot for header location to built-in converters (#1720) + * add support for the `muted` option on vimeo videos (allows autoplay to work in Chrome) (#3014) + * use value of prettify-theme attribute as is if it starts with http:// or https:// (#3020) + * allow icontype to be set using icons attribute (#2953) + * when using a server-side syntax highlighter, highlight content of source block even if source language is not set (#3027) + * automatically promote a listing block without an explicit style to a source block if language is set (#1117) + * remove the 2-character (i.e., `""`) quote block syntax + * don't allow block role to inherit from document attribute; only look for role in block attributes (#1944) + * split out functionality of -w CLI flag (script warnings) from -v CLI flag (verbose logging) (#3030) + * log possible invalid references at info level (#3030) + * log dropped lines at info level when attribute-missing=drop-line (#2861) + * honor attribute-missing setting when processing include directives and block macros (#2855) + * log warning when include directive is not resolved due to missing attribute or blank target; always include warning in output document (#2868) + * use the third argument of AbstractNode#attr / AbstractNode#attr? to set the name of a fallback attribute to look for on the document (#1934) + * change default value of third argument to Abstractnode#attr / AbstractNode#attr? to nil so attribute doesn't inherit by default (#3059) + * look for table-frame, table-grid, and table-stripes attributes on document as fallback for frame, grid, and stripes attributes on table (#3059) + * add support for hover mode for table stripes (stripes=hover) (#3110) + * always assume the target of a shorthand interdocument xref is a reference to an AsciiDoc document (source-to-source) (#3021) + * if the target of a formal xref macro has a file extension, assume it's a path reference (#3021) + * never assume target of a formal xref macro is a path reference unless a file extension or fragment is present (#3021) + * encode characters in URI to comply with RFC-3986 + * implement full support for styled xreftext in manpage converter (#3077) + * allow the ID and role properties to be set on a list item of ordered and unordered lists via the API (#2840) + * yield processor instance to registration block for document processor if block has non-zero arity (i.e., has parameters) + * add Document#parsed? method to check whether document has been parsed + * modify Cell class to extend from AbstractBlock instead of AbstractNode (#2963) + * implement block? and inline? methods on Column, both which return false (#2963) + * drop verse table cell style (treat as normal table cell) (#3111) + * allow negated subs to be specified on inline pass macro (#2191) + * log warning if footnoteref macro is found and compat mode is not enabled (#3114) + * log info message if inline macro processor returns a String value (#3176) + * apply subs to Inline node returned by inline macro processor if subs attribute is specified (#3178) + * add create_inline_pass helper method to base extension processor class (#3178) + * log debug message instead of warning if block style is unknown (#3092) + * allow backend to delegate to a registered backend using the syntax synthetic:delegate when using custom templates (e.g., slides:html) (#891) + * AbstractBlock#find_by looks inside AsciiDoc table cells if traverse_documents selector option is true (#3101) + * AbstractBlock#find_by finds table cells, which can be selected using the :table_cell context in the selector (#2524) + * allow ampersand to be used in e-mail address (#2553) + * propogate ID assigned to inline passthrough (#2912) + * rename control keywords in find_by to better align with the standard NodeFilter terminology + * stop find_by iteration if filter block returns :stop directive + * rename header_footer option to standalone (while still honoring header_footer for backwards compatibility) (#1444) + * replace anchors and xrefs before footnotes (replace footnotes last in macros substitution group) + * apply substitution for custom inline macro before all other macros + * only promote index terms automatically (A, B, C becomes A > B > C + B > C + C) if indexterm-promotion option is set on document (#1487) + * add support for see and see-also on index terms; parse attributes on indexterm macros if text contains `=` (#2047) + * drop :indexterms table from document catalog (in preparation for solution to #450 in a 2.x release) + * load additional languages for highlight.js as defined in the comma-separated highlightjs-languages attribute (#3036) + * log warning if conditional expression in ifeval directive is invalid (#3161) + * drop lines that contain an invalid preprocessor directive (#3161) + * rename AbstractBlock#find_by directives; use :prune in place of :skip_children and :reject in place of :skip + * convert example block into details/summary tag set if collapsible option is set; open by default if open option is set (#1699) + * substitute replacements in author values used in document header (#2441) + * require space after semi-colon that separates multiple authors (#2441) + * catalog inline anchors at start of callout list items (#2818) (*@owenh000*) + * add parse_attributes helper method to base extension Processor class (#2134) + +Improvements:: + + * propagate document ID to DocBook output (#3011) + * always store section numeral as string; compute roman numeral for part at assignment time (@vmj) + * refactor code to use modern Hash syntax + * define LIB_DIR constant; rename *_PATH constants to *_DIR constants to be consistent with RubyGems terminology (#2764) + * only define ROOT_DIR if not already defined (for compatibility with Asciidoctor.js) + * move custom docinfo content in footer below built-in docinfo content in footer in HTML converter (#3017) + * read and write files using File methods instead of IO methods (#2995) + * value comparison in AbstractNode#attr? is only performed if expected value is truthy + * align default CodeRay style with style for other syntax highlighters (#2106) + * ensure linenos class is added to linenos column when source highlighter is pygments and pygments-css=style + * disable table stripes by default (#3110) + * rename CSS class of Pygments line numbering table to linenotable (to align with Rouge) (#1040) + * remove unused Converter#convert_with_options method (#2891) + * add -e, --embedded CLI flag as alias for -s, --no-header-footer (require long option to specify eRuby impl) (#1444) + * don't store the options attribute on the block once the options are parsed (#3051) + * add an options method on AbstractNode to retrieve the set of option names (#3051) + * pass :input_mtime option to Document constructor; let Document constructor assign docdate/time/year attributes (#3029) + * never mutate strings; add a `frozen_string_literal: true` magic comment to top of all Ruby source files (#3054) + * always use docdate and doctime to compute docyear and docdatetime (#3064) + * rename PreprocessorReader#exceeded_max_depth? to PreprocessorReader#exceeds_max_depth? and return nil if includes are disabled + * stop populating :ids table in document catalog (#3084) + * always use :refs table in document catalog to look for registered IDs (#3084) + * don't compute and store reference text in document catalog (#3084) + * populate reference text table lazily for resolving ID by reference text (#3084) + * don't store fallback reference text on :bibref node (#3085) + * call AbstractNode#reftext instead of AbstractNode#text to retrieve reference text for bibref node (#3085) + * only map unparsed attrlist of inline macro to target when format is short + * add clearer exception message when source data is binary or has invalid encoding (#2884) + * rename context for table cell and table column to :table_cell and :table_column, respectively + * rename hardbreaks document attribute to hardbreaks-option; retain hardbreaks as a deprecated alias (#3123) + * extend TLD for implicit e-mail addresses to 5 characters (#3154) + * truncate with precision (instead of rounding) when computing absolute width for columns in DocBook output (#3131) + * drop legacy LaTeX math delimiters (e.g, `$..$`) if present (#1339) + * use proper terminology in warning message about mismatched preprocessor directive (#3165) + * rename low-level extension attribute name :pos_attrs to :positional_attrs + * mark default_attrs extension DSL method deprecated in favor of default_attributes + * upgrade MathJax to 2.7.5 + +Bug Fixes:: + + * fix crash caused by inline passthrough macro with the macros sub clearing the remaining passthrough placeholders (#3089) + * fix crash if ifeval directive is missing expression (#3164) + * prevent relative leveloffset from making section level negative and causing hang (#3152) + * don't fail to parse Markdown-style quote block that only contains attribution line (#2989) + * enforce rule that Setext section title must have at least one alphanumeric character; fixes problem w/ block nested inside quote block (#3060) + * apply header subs to doctitle value when assigning it back to the doctitle document attribute (#3106) + * don't fail if value of pygments-style attribute is not recognized; gracefully fallback to default style (#2106) + * do not alter the $LOAD_PATH (#2764) + * fix crash if stem block is empty (#3118) + * remove conditional comment for IE in output of built-in HTML converter; fixes sidebar table of contents (#2983) + * fix styling of source blocks with linenums enabled when using prettify as syntax highlighter (#640) + * update default stylesheet to support prettify themes (#3020) + * remove hard-coded color values on source blocks in default stylesheet (#3020) + * add fallback if relative path cannot be computed because the paths are located on different drives (#2944) + * ignore explicit section level style (#1852) + * don't eat space before callout number in source block if line-comment attribute is empty (#3121) + * check if type is defined in a way that's compatible with autoload + * fix invalid check for DSL in extension class (previously always returned true) + * scope constant lookups (#2764) + * use byteslice instead of slice to remove BOM from string (#2764) + * don't fail if value of -a CLI option is empty string or equals sign (#2997) + * allow failure level of CLI to be set to info + * Reader#push_include should not fail if data is nil + * fix deprecated ERB trim mode that was causing warning (#3006) + * move time anchor after query string on vimeo video to avoid dropping options + * allow color for generic text, line numbers, and line number border to inherit from Pygments style (#2106) + * enforce and report relative include depth properly (depth=0 rather than depth=1 disables nested includes) + * allow outfilesuffix to be soft set from API (#2640) + * don't split paragraphs in table cell at line that resolves to blank if adjacent to other non-blank lines (#2963) + * initialize the level to WARN when instantiating the NullLogger + * next_adjacent_block should not fail when called on dlist item (#3133) + * don't suppress browser styles for summary tag; add pointer cursor and panel margin bottom (#3155) + * only consider TLDs in e-mail address that have ASCII alpha characters + * allow underscore in domain of e-mail address + +Build / Infrastructure:: + + * clear SOURCE_DATE_EPOCH env var when testing timezones (PR #2969) (*@aerostitch*) + * remove compat folder (removes the AsciiDoc Python config file that provides pseudo-compliance with Asciidoctor and a stylesheet for an old Font Awesome migration) + * add Ruby 2.6.0 to build matrix + * stop running CI job on unsupported versions of Ruby + * exclude test suite, build script, and Gemfile from gem (#3044) + * split build tasks out into individual files + +Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). + +== 1.5.8 (2018-10-28) - @mojavelinux + +Enhancements:: + + * if set, add value of part-signifier and chapter-signifier attributes to part and chapter titles (#2738) + * allow position (float) and alignment (align) to be set on video block (#2425) + * substitute attribute references in attrlist of include directive (#2761) + * add Document#set_header_attribute method for adding method directly to document header during parsing (#2820) + * add helper method to extension processor classes to create lists and list items + * allow ordered and unordered lists to be nested to an arbitrary / unlimited depth (#2854) + * add `prefer` DSL method to extension registry and document processor to flag extension as preferred (#2848) + * allow manname and manpurpose to be set using document attributes; don't look for NAME section in this case (#2810) + * substitute attribute references in target of custom block macro (honoring attribute-missing setting) (#2839) + * interpret `<.>` as an auto-numbered callout in verbatim blocks and callout lists (#2871) + * require marker for items in callout list to have circumfix brackets (e.g., `<1>` instead of `1>`) (#2871) + * preserve comment guard in front of callout number in verbatim block if icons is not enabled (#1360) + * add more conventional styles to quote block when it has the excerpt role (#2092) + * colspecs can be separated by semi-colon instead of comma (#2798) + * change AbstractBlock#find_by to respond to StopIteration exception; stop traversal after matching ID (#2900) + * change AbstractBlock#find_by to honor return values :skip and :skip_children from filter block to skip node and its descendants or just its descendants, respectively (#2067) + * add API to retrieve authors as array; use API in converters (#1042) (*@mogztter*) + * add support for start attribute on source block to set starting line number when converting to DocBook (#2915) + * track imagesdir for image on node and in catalog (#2779) + * allow starting line number to be set using start attribute when highighting source block with Pygments or CodeRay (#1742) + * upgrade highlight.js to 9.13.1 + +Bug Fixes:: + + * don't hang on description list item that begins with /// (#2888) + * don't crash when using AsciiDoc table cell style on column in CSV table (#2817) + * show friendly error if CSV data for table contains unclosed quote (#2878) (*@zelivans*) + * don't crash when attribute entry continuation is used on last line of file (#2880) (*@zelivans*) + * treat empty/missing value of named block attribute followed by other attributes (e.g., caption=,cols=2*) as empty string + * AbstractNode#set_option does nothing if option is already set (PR #2778) + * allow revnumber to be an attribute reference in revision info line (#2785) + * use ::File.open instead of ::IO.binread in Reader for Asciidoctor.js compatibility + * add fallback for timezone when setting doctime + * preserve UNC path that begins with a double backslash (Windows) (#2869) + * fix formatting of quote block (indentation) in manpage output (#2792) + * catalog inline anchors in ordered list items (#2812) + * detect closing tag on last line with no trailing newline (#2830) + * process `!name@` attribute syntax property; follow-up to #642 + * change document extension processor DSL methods to return registered extension instance instead of array of instances + * use fallback value for manname-title to prevent crash in manpage converter + * consolidate inner whitespace in prose in manpage output (#2890) + * only apply subs to node attribute value if enclosed in single quotes (#2905) + * don't hide URI scheme if target of link macro is a bare URI scheme + * fix crash when child section of part is out of sequence and section numbering is enabled (#2931) + * fix crash when restoring passthroughs if passthrough role is enclosed in single quotes (#2882, #2883) + * don't eagerly apply subs to inline attributes in general + * make sure encoding of output file is UTF-8 + * prevent warning about invalid `:asciidoc` option when using custom templates with Slim 4 (#2928) + * use Pathname#relative_path_from to compute relative path to file outside of base directory (#2108) + +Improvements:: + + * change trailing delimiter on part number to colon (:) (#2738) + * interpret open line range as infinite (#2914) + * rename number property on AbstractBlock to numeral, but keep number as deprecated alias + * use CSS class instead of hard-coded inline float style on tables and images (#2753) + * use CSS class instead of hard-coded inline text-align style on block images (#2753) + * allow hyphen to be used custom block macro name as long as it's not the first character (#2620) + * use shorthands %F and %T instead of %Y-%m-%d and %H:%M:%S to format time + * read file in binary mode whenever contents are being normalized + * use .drop(0) to duplicate arrays (roughly 1.5x as fast as .dup) + * only recognize a bullet glyph which is non-repeating as an unordered list marker + * rename SyntaxDsl module to SyntaxProcessorDsl (internal) + * fail if name given to block macro contains illegal characters + * normalize all whitespace in value of manpurpose attribute + * make space before callout number after custom line comment character optional + * parse attrlist on inline passthrough as a shorthand attribute syntax or literal role (#2910) + * add support for range syntax (.. delimiter) to highlight attribute on source block (#2918) + * add support for unbounded range to highlight attribute on source block (#2918) + * automatically assign title and caption on image block if title is set on custom block source (#2926) + * use OS independent timezone (UTC or time offset) in doctime and localtime attributes (#2770) + * report correct line number for inline anchor with id already in use (#2769) + * generate manpage even if input is non-conforming or malformed (#1639) + * allow authorinitials for single author to be overridden (#669) + +Documentation:: + + * translate README into German (#2829) (*@jwehmschulte*) + * sync French translation of README (*@mogztter*) + * add Swedish translation of built-in attributes (PR #2930) (*@jonasbjork*) + +Build / Infrastructure:: + + * replace thread_safe with concurrent-ruby (PR #2822) (*@junaruga*) + +== 1.5.7.1 (2018-05-10) - @mojavelinux + +Bug Fixes:: + + * fix regression where block attributes where being inherited by sibling blocks in a complex list item (#2771) + * don't apply lead styling to first paragraph in nested document (AsciiDoc table cell) if role is present (#2624) + +Build / Infrastructure:: + + * drop obsolete logic in rake build (*@aerostitch*) + * allow lib dir to be overridden for tests using an environment variable (PR #2758) (*@aerostitch*) + * load asciidoctor/version from LOAD_PATH in gemspec if not found locally (PR #2760) (*@aerostitch*) + +== 1.5.7 (2018-05-02) - @mojavelinux + +Enhancements:: + + * BREAKING: drop XML tags, character refs, and non-word characters (except hyphen, dot, and space) when auto-generating section IDs (#794) + ** hyphen, dot, and space are replaced with value of idseparator, if set; otherwise, spaces are dropped + * allow attribute names to contain any word character defined by Unicode (#2376, PR #2393) + * do not recognize attribute entry line if name contains a colon (PR #2377) + * route all processor messages through a logger instead of using Kernel#warn (#44, PR #2660) + * add MemoryLogger for capturing messages sent to logger into memory (#44, PR #2660) + * add NullLogger to prevent messages from being logged (#44, PR #2660) + * log message containing source location / cursor as an object; provides more context (#44, PR #2660) + * pass cursor for include file to `:include_location` key in message context (PR #2729) + * add `:logger` option to API to set logger instance (#44, PR #2660) + * add `--failure-level=LEVEL` option to CLI to force non-zero exit code if specified logging level is reached (#2003, PR #2674) + * parse text of xref macro as attributes if attribute signature found (equal sign) (#2381) + * allow xrefstyle to be specified per xref by assigning the xrefstyle attribute on the xref macro (#2365) + * recognize target with .adoc extension in xref macro as an interdocument xref + * resolve nested includes in remote documents relative to URI (#2506, PR #2511) + * allow `relfilesuffix` attribute to control file extension used for interdoc xrefs (#1273) + * support `!name@` (preferred), `!name=@`, `name!@`, and `name!=@` syntax to soft unset attribute from API or CLI (#642, PR #2649) + * allow modifier to be placed at end of name to soft set an attribute (e.g., `icons@=font`) (#642, PR #2649) + * interpret `false` attribute value defined using API as a soft unset (#642, PR #2649) + * number parts if `partnums` attribute is set (#2298) + * allow footnote macro to define or reference footnote reference (footnoteref macro now deprecated) (#2347, PR #2362) + * allow custom converter to be used with custom templates; converter must declare that it supports templates (#2619) + * add syntax help topic to CLI (`-h syntax`) (#1573) + * allow manpage path for manpage help topic to be specified using ASCIIDOCTOR_MANPAGE_PATH environment variable (PR #2653) (*@aerostitch*) + * if manpage cannot be found in default path inside gem, use `man -w asciidoctor` to resolve installed path (PR #2653) + * uncompress contents of manpage for manpage help topic if path ends with .gz (PR #2653) (*@aerostitch*) + * define source and manual refmiscinfo entries in manpage output if manual and source attributes are defined (PR #2636) (*@tiwai*) + * add syntax for adding hard line breaks in block AsciiMath equations (#2497, PR #2579) (*@dimztimz*) + * add positioning option to sectanchors attribute (sectanchors=before or sectanchors=after) (#2485, PR #2486) + * allow table striping to be configured using stripes attribute (even, odd, all, or none) or stripes roles on table (#1365, PR #2588) + * recognize `ends` as an alias to `topbot` for configuring the table frame + * add rel=nofollow property to links (text or image) when nofollow option is set (#2605, PR #2692) + * populate Document#source_location when sourcemap option is enabled (#2478, PR #2488) + * populate source_location property on list items when sourcemap option is set on document (PR #2069) (*@mogztter*) + * populate Table::Cell#source_location when sourcemap option is enabled (#2705) + * allow local include to be flagged as optional by setting optional option (#2389, PR #2413) + * allow block title to begin with a period (#2358, PR #2359) + * catalog inline anchor at start of list items in ordered and unordered lists, description list terms, and table cells (#2257) + * register document in catalog if id is set; assign reftext to document attributes if specified in a block attribute line (#2301, PR #2428) + * allow automatic width to be applied to individual columns in a table using the special value `~` (#1844) + * use the quote element in DocBook converter to represent smart quotes (#2272, PR #2356) (@bk2204) + * parse and pass all manpage names to output document master (i.e., shadow man pages) (#1811, #2543, PR #2414) + * parse credit line of shorthand quote block as block attributes; apply normal subs to credit line in shorthand quote blocks (#1667, PR #2452) + * populate copyright element in DocBook output from value of copyright attribute (#2728) + * preserve directories if source dir and destination dir are set (#1394, PR #2421) + * allow linkcss to be unset from API or CLI when safe mode is secure + * convert quote to epigraph element in DocBook output if block has epigraph role (#1195, PR #2664) (*@bk2204*) + * number special sections in addition to regular sections when sectnums=all (#661, PR #2463) + * upgrade to Font Awesome 4.7.0 (#2569) + * upgrade to MathJax 4.7.4 + +Bug Fixes:: + + * set `:to_dir` option value correctly when output file is specified (#2382) + * preserve leading indentation in contents of AsciiDoc table cell if contents starts with a newline (#2712) + * the shorthand syntax on the style to set block attributes (id, roles, options) no longer resets block style (#2174) + * match include tags anywhere on line as long as offset by word boundary on left and space or newline on right (#2369, PR #2683) + * warn if an include tag specified in the include directive is unclosed in the included file (#2361, PR #2696) + * use correct parse mode when parsing blocks attached to list item (#1926) + * fix typo in gemspec that removed README and CONTRIBUTING files from the generated gem (PR #2650) (*@aerostitch*) + * preserve id, role, title, and reftext on open block when converting to DocBook; wrap in `` or `` (#2276) + * don't turn bare URI scheme (no host) into a link (#2609, PR #2611) + * don't convert inter-document xref to internal anchor unless entire target file is included into current file (#2200) + * fix em dash replacement in manpage converter (#2604, PR #2607) + * don't output e-mail address twice when replacing bare e-mail address in manpage output (#2654, PR #2665) + * use alternate macro for monospaced text in manpage output to not conflict w/ AsciiDoc macros (#2751) + * enforce that absolute start path passed to PathResolver#system_path is inside of jail path (#2642, PR #2644) + * fix behavior of PathResolver#descends_from? when base path equals / (#2642, PR #2644) + * automatically recover if start path passed to PathResolver#system_path is outside of jail path (#2642, PR #2644) + * re-enable left justification after invoking tmac URL macro (#2400, PR #2409) + * don't report warning about same level 0 section multiple times (#2572) + * record timings when calling convert and write on Document (#2574, PR #2575) + * duplicate header attributes when restoring; allows header attributes to be restored an arbitrary number of times (#2567, PR #2570) + * propagate `:catalog_assets` option to nested document (#2564, PR #2565) + * preserve newlines in quoted CSV data (#2041) + * allow opening quote around quoted CSV field to be on a line by itself + * output table footer after body rows (#2556, PR #2566) (*@PauloFrancaLacerda*) + * move @page outside of @media print in default stylesheet (#2531, PR #2532) + * don't throw exception if text of dd node is nil (#2529, PR #2530) + * don't double escape ampersand in manpage output (#2525) (*@dimztimz*) + * fix crash when author_1 attribute is assigned directly (#2481, PR #2487) + * fix CSS for highlighted source block inside colist (#2474, PR #2490) + * don't append file extension to data uri of admonition icon (#2465, PR #2466) + * fix race condition in Helpers.mkdir_p (#2457, PR #2458) + * correctly process nested passthrough inside unconstrained monospaced (#2442, PR #2443) + * add test to ensure ampersand in author line is not double escaped (#2439, PR #2440) + * prevent footnote ID from clashing with auto-generated footnote IDs (#2019) + * fix alignment of icons in footnote (#2415, PR #2416) + * add graceful fallback if pygments.rb fails to return a value (#2341, PR #2342) + * escape specialchars in source if pygments fails to highlight (#2341) + * do not recognize attribute entry line if name contains colon (PR #2377) + * allow flow indexterm to be enclosed in round brackets (#2363, PR #2364) + * set outfilesuffix to match file extension of output file (#2258, PR #2367) + * add block title to dlist in manpage output (#1611, PR #2434) + * scale text to 80% in print styles (#1484, PR #2576) + * fix alignment of abstract title when using default stylesheet (PR #2732) + * only set nowrap style on table caption for auto-width table (#2392) + * output non-breaking space for man manual if absent in DocBook output (PR #2636) + * don't crash if stem type is not recognized (instead, fallback to asciimath) + +Improvements / Refactoring:: + + * BREAKING: rename table spread role to stretch (#2589, PR #2591) + * use cursor marks to track lines more accurately; record cursor at the start of each block, list item, or table cell (PR #2701, PR #2547) (*@seikichi*) + * log a warning message if an unterminated delimited block is detected (#1133, PR #2612) + * log a warning when nested section is found inside special section that doesn't support nested sections (#2433, PR #2672) + * read files in binary mode to disable automatic endline coercion (then explicitly coerce to UTF-8) (PR #2583, PR #2694) + * resolve / expand parent references in start path passed to PathResolver#system_path (#2642, PR #2644) + * update PathResolver#expand_path to resolve parent references (#2642, PR #2644) + * allow start path passed to PathResolver#system_path to be outside jail if target brings resolved path back inside jail (#2642, PR #2644) + * don't run File.expand_path on Dir.pwd (assume Dir.pwd is absolute) (#2642, PR #2644) + * posixify working_dir passed to PathResolver constructor if absolute (#2642, PR #2644) + * optimize detection for footnote* and indexterm* macros (#2347, PR #2362) + * log a warning if a footnote reference cannot be resolved (#2669) + * set logger level to DEBUG when verbose is enabled + * coerce value of `:template_dirs` option to an Array (PR #2621) + * make block roles specified using shorthand syntax additive (#2174) + * allow paragraph to masquerade as open block (PR #2412) + * move callouts into document catalog (PR #2394) + * document ID defined in block attribute line takes precedence over ID defined inside document title line + * don't look for link and window attributes on document when resolving these attributes for an image + * when linkattrs is set, only parse attributes in link macro if equals is present + * skip line comments in name section of manpage (#2584, PR #2585) + * always activate extension registry passed to processor (PR #2379) + * skip extension registry activation if no groups are registered (PR #2373) + * don't apply lead styling to first paragraph if role is present (#2624, PR #2625) + * raise clearer exception when extension class cannot be resolved (#2622, PR #2623) + * add methods to read results from timings (#2578, PR #2580) + * collapse bottom margin of last block in AsciiDoc table cell (#2568, PR #2593) + * set authorcount to 0 if there are no authors (#2519, PR #2520) + * validate fragment of interdoc xref that resolves to current doc (#2448, PR #2449) + * put id attribute on tag around phrase instead of preceding anchor (#2445, PR #2446) + * add .plist extension to XML circumfix comment family (#2430, PR #2431) (*@akosma*) + * alias Document#title method to no args Document#doctitle method (#2429, PR #2432) + * upgrade missing or unreadable include file to an error (#2424, PR #2426) + * add compliance setting to disable natural cross references (#2405, PR #2460) + * make hash in inter-document xref target optional if target has extension (#2404, PR #2406) + * add CSS class to part that matches role (#2401, PR #2402) + * add fit-content class to auto-width table (#2392) + * automatically assign parent reference when adding node to parent (#2398, PR #2403) + * leave inline anchor in section title as is if section has ID (#2243, PR #2427) + * align and improve error message about invalid use of partintro between HTML5 and DocBook converters + * rephrase warning when level 0 sections are found and the doctype is not book + * report correct line number when duplicate bibliography anchor is found + * only warn if thread_safe gem is missing when using built-in template cache + * rename enumerate_section to assign_numeral; update API docs + * drop deprecated compact option from CLI; remove from manpage + * use more robust mechanism for lazy loading the asciimath gem + * use consistent phrase to indicate the processor is automatically recovering from a problem + * change Reader#skip_comment_lines to not return skipped lines + * add styles to default stylesheet for display on Kindle (kf8) devices (PR #2475) + * purge render method from test suite (except to verify alias) + +Documentation:: + + * translate 'section-refsig' for German language (PR #2633) (*@ahus1*) + * synchronize French README with English version (PR #2637) (*@flashcode*) + +Build / Infrastructure:: + + * create an official logo for the project (#48) (*@mmajko*) + * update Ruby versions in appveyor build matrix (PR #2388) (*@miltador*) + * add mailinglist, changelog, source, and issues URI to gem spec + * allow blocks and substitutions tests to be run directly + * asciidoctor formula now available for Homebrew (*@zmwangx*) + +Distribution Packages:: + + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] + * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] + * https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] + +== 1.5.6.2 (2018-03-20) - @mojavelinux + +Bug Fixes:: + + * fix match for multiple xref macros w/ implicit text in same line (#2450) + * PathResolver#root? returns true for absolute URL in browser env (#2595) + +Improvements / Refactoring:: + + * resolve include target correctly in browser (xmlhttprequest IO module) (#2599, #2602) + * extract method to resolve include path (allowing Asciidoctor.js to override) (#2610) + * don't expand docdir value passed to API (#2518) + * check mandatory attributes when creating an image block (#2349, PR #2355) (*@mogztter*) + * drop is_ prefix from boolean methods in PathResolver (PR #2587) + * change Reader#replace_next_line to return true + * organize methods in AbstractNode + +Build / Infrastructure:: + + * clean up dependencies + * add Ruby 2.5.0 to CI build matrix (PR #2528) + * update nokogiri to 1.8.0 for ruby >= 2.1 (PR #2380) (*@miltador*) + +Distribution Packages:: + + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] + * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] + +https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.6.2[issues resolved] | +https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.6.2[git tag] | +https://github.com/asciidoctor/asciidoctor/compare/v1.5.6.1...v1.5.6.2[full diff] + +== 1.5.6.1 (2017-07-23) - @mojavelinux + +Enhancements:: + + * Don't include title of special section in DocBook output if untitled option is set (e.g., dedication%untitled) + +Bug Fixes:: + + * continue to read blocks inside a delimited block after content is skipped (PR #2318) + * don't create an empty paragraph for skipped content inside a delimited block (PR #2319) + * allow the subs argument of Substitutors#apply_subs to be nil + * coerce group name to symbol when registering extension (#2324) + * eagerly substitute attributes in target of inline image macro (#2330) + * don't warn if source stylesheet can't be read but destination already exists (#2323) + * track include path correctly if path is absolute and outside of base directory (#2107) + * preprocess second line of setext section title (PR #2321) + * preprocess second line of setext discrete heading (PR #2332) + * return filename as relative path if filename doesn't share common root with base directory (#2107) + +Improvements / Refactoring:: + + * change default text for inter-document xref (PR #2316) + * add additional tests to test behavior of Reader#peek_lines + * parse revision info line correctly that only has version and remark; add missing test for scenario + * rename AtxSectionRx constant to AtxSectionTitleRx for consistency with SetextSectionTitleRx constant + * use terms "atx" and "setext" to refer to section title syntax (PR #2334) + * rename HybridLayoutBreakRx constant to ExtLayoutBreakRx + * change terminology from "floating title" to "discrete heading" + * consolidate skip blank lines and check for end of reader (PR #2325) + * have Reader#skip_blank_lines report end of file (PR #2325) + * don't mix return type of Parser.build_block method (PR #2328) + * don't track eof state in reader (PR #2320) + * use shift instead of advance to consume line when return value isn't needed (PR #2322) + * replace terminology "floating title" with "discrete heading" + * remove unnecessary nil_or_empty? checks in substitutor + * leverage built-in assert / refute methods in test suite + +Build / Infrastructure:: + + * config Travis CI job to release gem (PR #2333) + * add SHA1 hash to message used for triggered builds + * trigger build of AsciidoctorJ on every change to core + * trigger build of Asciidoctor Diagram on every change to core + +Distribution Packages:: + + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] + * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] + * https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] + +https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.6.1[issues resolved] | +https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.6.1[git tag] | +https://github.com/asciidoctor/asciidoctor/compare/v1.5.6\...v1.5.6.1[full diff] + +== 1.5.6 (2017-07-12) - @mojavelinux + +Enhancements:: + + * use custom cross reference text if xrefstyle attribute is set (full, short, basic) (#858, #1132) + * store referenceable nodes under refs key in document catalog (PR #2220) + * apply reftext substitutions (specialchars, quotes, replacements) to value returned by reftext method (PR #2220) + * add xreftext method to AbstractBlock, Section, and Inline to produce formatted text for xref (PR #2220) + * introduce attributes chapter-refsig, section-refsig, and appendix-refsig to set reference signifier for chapter, section, and appendix, respectively (PR #2220) + * add rel="noopener" to links that target _blank or when noopener option is set (#2071) + * add option to exclude tags when including a file (#1516) + * add meta for shortcut icon if favicon attribute is set (#1574) + * allow use of linenums option to enable line numbers on a source block (#1981) + * allow extension groups to be unregistered individually (#1701) + * catalog bibliography anchors and capture reftext (#560, #1562) + * automatically add bibliography style to unordered list in bibliography section (#1924) + * disable startinline option when highlighting PHP if mixed option is set on source block (PR #2015) (*@ricpelo*) + * configure Slim to resolve includes in specified template dirs (#2214) + * dump manpage when -h manpage flag is passed to CLI (#2302) + * add resolves_attributes method to DSL for macros (#2122) + * invoke convert on result of custom inline macro if value is an inline node (#2132) + * resolve attributes for custom short inline macros if requested (#1797) + * add convenience method to create section from extension; use same initialization logic as parser (#1957) + * add handles? method to DSL for IncludeProcessor (#2119) + * pass through preload attribute to video tag (#2046) + * add start and end times for audio element (#1930) + * set localyear and docyear attributes (#1372) + * pass cloaked context to block extension via cloaked-context attribute (#1606) + * add support for covers in DocBook 5 converter (#1939) + * accept named pipe (fifo) as the input file (#1948) + * add AbstractBlock#next_adjacent_block helper method + * rename Document#references to catalog; alias references to catalog (PR #2237) + * rename extensions_registry option to extension_registry + * rename Extensions.build_registry method to create + * autoload extensions source file when Asciidoctor::Extensions is referenced (PR #2114, PR #2312) + * apply default_attrs to custom inline macro (PR #2127) + * allow tab separator for table to be specified using \t (#2073) + * add Cell#text= method + +Improvements:: + + * significant improvements to performance, especially in parser and substitutors + * process include directive inside text of short form preprocessor conditional (#2146) + * add support for include tags in languages that only support only circumfix comments (#1729) + * allow spaces in target of block image; target must start and end with non-space (#1943) + * add warning in verbose mode if xref is not found (#2268) (*@fapdash*) + * add warning if duplicate ID is detected (#2244) + * validate that output file will not overwrite input file (#1956) + * include docfile in warning when stylesheet cannot be read (#2089) + * warn if doctype=inline is used and block has unexpected content model (#1890) + * set built-in docfilesuffix attribute (#1673) + * make sourcemap field on Document read/write (#1916) + * allow target of xref to begin with attribute reference (#2007) + * allow target of xref to be expressed with leading # (#1546) + * allow kbd and btn macros to wrap across multiple lines (#2249) + * allow menu macro to span multiple lines; unescape escaped closing bracket + * make menu macro less greedy + * allow ampersand to be used as the first character of the first segment of a menu (#2171) + * enclose menu caret in HTML tag (#2165) + * use black text for menu reference; tighten word spacing (#2148) + * fix parsing of keys in kbd macro (PR #2222) + * add support for the window option for the link on a block image (#2172) + * set correct level for special sections in parser (#1261) + * always set numbered property on appendix to true + * store number for formal block on node (#2208) + * set sectname of header section to header (#1996) + * add the remove_attr method to AbstractNode (#2227) + * use empty string as default value for set_attr method (#1967) + * make start argument to system_path optional (#1965) + * allow API to control subs applied to ListItem text (#2035) + * allow text of ListItem to be assigned (in an extension) (#2033) + * make generate_id method on section a static method (#1929) + * validate name of custom inline macro; cache inline macro rx (#2136) + * align number in conum list to top by default (#1999) + * fix CSS positioning of interactive checkbox (#1840) + * fix indentation of list items when markers are disabled (none, no-bullet, unnumbered, unstyled) (PR #2286) + * instruct icon to inherit cursor if inside a link + * close all files opened internally (#1897) + * be more precise about splitting kbd characters (#1660) + * rename limit method on String to limit_bytesize (#1889) + * leverage Ruby's match? method to speed up non-capturing regexps (PR #1938) + * preserve inline break in manpages (*@letheed*) + * check for presence of SOURCE_DATE_EPOCH instead of value; fail if value is malformed + * add Rows#by_section method to return table sections (#2219) + * cache which template engines have been loaded to avoid unnecessary processing + * rename assign_index method to enumerate_section (PR #2242) + * don't process double quotes in xref macro (PR #2241) + * optimize attr and attr? methods (PR #2232) + * use IO.write instead of File.open w/ block; backport for Opal + * backport IO.binread to Ruby 1.8.7 to avoid runtime check + * cache backend and doctype values on document + * allow normalize option to be set on PreprocessorReader; change default to false + * move regular expression constants for Opal to Asciidoctor.js build (PR #2070) + * add missing comma in warning message for callout list item out of sequence + * combine start_with? / end_with? checks into a single method call + * rename UriTerminator constant to UriTerminatorRx + * promote subs to top-level constants; freeze arrays + * rename PASS_SUBS constant to NONE_SUBS + * rename EOL constant to LF (retain EOL as alias) + * rename macro regexp constants so name follows type (e.g., InlineImageMacroRx) + +Compliance:: + + * retain block content in items of callout list when converting to HTML and man page (#1478) + * only substitute specialchars for content in literal table cells (#1912) + * fix operator logic for ifndef directive with multiple attributes (#1983) + * only recognize uniform underline for setext section title (#2083) + * don't match headings with mixed leading characters (#2074) + * fix layout break from matching lines it shouldn't + * fix behavior of attribute substitution in docinfo content (PR #2296) + * encode spaces in URI (PR #2274) + * treat empty string as a valid block title + * preprocess lines of a simple block (#1923) + * don't drop trailing blank lines when splitting source into lines (PR #2045) + * only drop known AsciiDoc extensions from the inter-document xref path (#2217) + * don't number special sections or special subsections by default (#2234) + * assign sectname based on name of manuscript element (#2206) + * honor leveloffset when resolving implicit doctitle (#2140) + * permit leading, trailing, and repeat operators in target of preprocessor conditional (PR #2279) + * don't match link macro in block form (i.e., has two colons after prefix) (#2202) + * do not match bibliography anchor that begins with digit (#2247) + * use [ \t] (or \s) instead of \p{Blank} to match spaces (#2204) + * allow named entity to have trailing digits (e.g., there4) (#2144) + * only assign style to image alt text if alt text is not specified + * substitute replacements in non-generated alt text of block image (PR #2285) + * keep track of whether alt text is auto-generated by assigning default-alt attribute (PR #2287) + * suppress info element in docbook output if noheader attribute is set (#2155) + * preserve leading indentation in literal and verse table cells (#2037) + * preserve whitespace in literal and verse table cells (#2029) + * set doctype-related attributes in AsciiDoc table cell (#2159) + * fix comparison logic when preprocessing first line of AsciiDoc table cell + * set filetype to man when backend is manpage (#2055) + * respect image scaling in DocBook converter (#1059) + * share counters between AsciiDoc table cells and main document (#1942) + * generate ID for floating title from converted title (#2016) + * split "treeprocessor" into two words; add aliases for compatibility (PR #2179) + * allow trailing hyphen in attribute name used in attribute reference + * allow escaped closing bracket in text of xref macro + * process pass inline macro with empty text; invert extract logic + * drop support for reftext document attribute (must be specified on node) + * fix compliance with Haml >= 5 (load Haml eagerly; remove ugly option) + * don't match inline image macro if target contains endline or leading or trailing spaces + * assign id instead of target on ref/bibref node (PR #2307) + * remove regexp hacks for Opal (#2110) + * drop outdated quoting exceptions for Opal (PR #2081) + +Bug Fixes:: + + * don't allow table borders to cascade to nested tables (#2151) + * escape special characters in reftext of anchor (#1694) + * sanitize content of authors meta tag in HTML output (#2112) + * use correct line number in warning for invalid callout item reference (#2275) + * fix stray marks added when unescaping unconstrained passthroughs (PR #2079) + * don't confuse escaped quotes in CSV data as enclosing quotes (#2008) + * don't activate implicit header if cell in first line of table contains a blank line (#1284, #644) + * allow compat-mode in AsciiDoc table cell to inherit from parent document (#2153) + * manify all normal table cell content (head, body, foot) in manpage output + * add missing newline after table caption in manpage output (#2253) + * correctly format block title on video in manpage output + * don't crash if substitution list resolves to nil (#2183) + * fail with informative message if converter cannot be resolved (#2161) + * fix regression of not matching short form of custom block macro + * encode double quotes in image alt text when used in an attribute (#2061) + * encode double quote and strip XML tags in value of xreflabel attribute in DocBook converter (PR #2220) + * fix typo in base64 data (PR #2094) (*@mogztter*) + * permit pass macro to surround a multi-line attribute value with hard line breaks (#2211) + * fix sequential inline anchor macros with empty reftext (#1689) + * don't mangle compound names when document has multiple authors (#663) + * don't drop last line of verbatim block if it contains only a callout number (#2043) + * prevent leading & trailing round brackets from getting caught in indexterm (#1581) + * remove cached title when title is set on block (#2022) + * remove max-width on the callout number icon (#1895) + * eagerly add hljs class for highlight.js (#2221) + * fix SOURCE_DATE_EPOCH lookup in Opal + * fix paths with file URI scheme are inevitably absolute (PR #1925) (*@mogztter*) + * only resolve file URLs when JavaScript IO module is xmlhttprequest (PR #1898) (*@mogztter*) + * fix formatting of video title in manpage converter + * don't increment line number if peek_lines overruns buffer (fixes some cases when line number is off) + * freeze extension processor instance, not class + * fix numbering bug in reindex_sections + * handle cases when there are no lines for include directive to select + +Documentation:: + + * enable admonition icons in README when displayed on GitHub + * add German translation of chapter-label (PR #1920) (*@fapdash*) + * add Ukrainian translation of built-in attributes (PR #1955) (*@hedrok*) + * add Norwegian Nynorsk translation; updated Norwegian Bokmål translation of built-in attributes (PR #2142) (*@huftis*) + * add Polish translation of built-in attributes (PR #2131) (*@ldziedziul*) + * add Romanian translation of built-in attributes (PR #2125) (*@vitaliel*) + * fix Japanese translation of built-in attributes (PR #2116) (*@haradats*) + * add Bahasa Indonesia translation of built-in labels (*@triyanwn*) + +Build / Infrastructure:: + + * upgrade highlight.js to 9.12.0 (#1652) + * include entire test suite in gem (PR #1952) (*@voxik*) + * upgrade Slim development dependency to 3.0.x (PR #1953) (*@voxik*) + * upgrade Haml development dependency to 5.0.x + * upgrade Nokogiri to 1.6.x (except on Ruby 1.8) (PR #1213) + * add Ruby 2.4 to CI test matrix (PR #1980) + * upgrade cucumber and JRuby in CI build (PR #2005) + * fix reference to documentation in attributes.adoc (PR #1901) (*@stonio*) + * trap and verify all warnings when tests are run with warnings enabled + * set default task in build to test:all + * configure run-tests.sh script to run all tests + * configure feature tests to only show progress + * configure Slim in feature tests to use html as format instead of deprecated html5 + * lock version of yard to fix invalid byte sequence in Ruby 1.9.3 + * modify rake build to trigger dependent builds (specifically, Asciidoctor.js) (PR #2305) (*@mogztter*) + +Distribution Packages:: + + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] + * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] + +https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.6[issues resolved] | +https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.6[git tag] | +https://github.com/asciidoctor/asciidoctor/compare/v1.5.5\...v1.5.6[full diff] + == 1.5.5 (2016-10-05) - @mojavelinux Enhancements:: + * Add preference to limit the maximum size of an attribute value (#1861) - * Honor SOURCE_DATE_EPOCH environment variable to accomodate reproducible builds (@JojoBoulix) (#1721) + * Honor SOURCE_DATE_EPOCH environment variable to accomodate reproducible builds (#1721) (*@JojoBoulix*) * Add reversed attribute to ordered list if reversed option is enabled (#1830) * Add support for additional docinfo locations (e.g., :header) * Configure default stylesheet to break monospace word if exceeds length of line; add roles to prevent breaks (#1814) - * Introduce translation file for built-in labels (@ciampix) - * Provide translations for built-in labels (@JmyL - kr, @ciampix - it, @ivannov - bg, @maxandersen - da, @radcortez - pt, @eddumelendez - es, @leathersole - jp, @aslakknutsen - no, @shahryareiv - fa, @AlexanderZobkov - ru, @dongwq - zh, @rmpestano - pt_BR, @ncomet - fr, @lgvz - fi, @patoi - hu, @BojanStipic - sr, @fwilhe - de, @rahmanusta - tr, @abelsromero - ca, @aboullaite - ar, @roelvs - nl) - * Translate README to Chinese (@diguage) - * Translate README to Japanese (@Mizuho32) + * Introduce translation file for built-in labels (*@ciampix*) + * Provide translations for built-in labels (*@JmyL* - kr, *@ciampix* - it, *@ivannov* - bg, *@maxandersen* - da, *@radcortez* - pt, *@eddumelendez* - es, *@leathersole* - jp, *@aslakknutsen* - no, *@shahryareiv* - fa, *@AlexanderZobkov* - ru, *@dongwq* - zh, *@rmpestano* - pt_BR, *@ncomet* - fr, *@lgvz* - fi, *@patoi* - hu, *@BojanStipic* - sr, *@fwilhe* - de, *@rahmanusta* - tr, *@abelsromero* - ca, *@aboullaite* - ar, *@roelvs* - nl) + * Translate README to Chinese (*@diguage*) + * Translate README to Japanese (*@Mizuho32*) Improvements:: + * Style nested emphasized phrases properly when using default stylesheet (#1691) * Honor explicit table width even when autowidth option is set (#1843) * Only explicit noheader option on table should disable implicit table header (#1849) @@ -37,22 +934,23 @@ * Enable font smoothing on Firefox on OSX (#1837) * Support combined use of sectanchors and sectlinks in HTML5 output (#1806) * fix API docs for find_by - * Upgrade to Font Awesome 4.6.3 (@allenan, @mogztter) (#1723) + * Upgrade to Font Awesome 4.6.3 (#1723) (*@allenan*, *@mogztter*) * README: add install instructions for Alpine Linux * README: Switch yum commands to dnf in README * README: Mention Mint as a Debian distro that packages Asciidoctor - * README: Add caution advising against using gem update to update a system-managed gem (@oddhack) - * README: sync French version with English version (@flashcode) + * README: Add caution advising against using gem update to update a system-managed gem (*@oddhack*) + * README: sync French version with English version (*@flashcode*) * Add missing endline after title element when converting open block to HTML * Move list_marker_keyword method from AbstractNode to AbstractBlock * Rename definition list to description list internally Compliance:: + * Support 6-digit decimal char refs, 5-digit hexidecimal char refs (#1824) * Compatibility fixes for Opal * Check for number using Integer instead of Fixnum class for compatibility with Ruby 2.4 -Bug fixes:: +Bug Fixes:: * Use method_defined? instead of respond_to? to check if method is already defined when patching (#1838) * Fix invalid conditional in HTML5 converter when handling of SVG * Processor#parse_content helper no longer shares attribute list between blocks (#1651) @@ -64,62 +962,69 @@ * Don't duplicate forward slash for path relative to root (#1822) Infrastructure:: + * Build gem properly in the absense of a git workspace, make compatible with JRuby (#1779) - * Run tests in CI using latest versions of Ruby, including Ruby 2.3 (@ferdinandrosario) + * Run tests in CI using latest versions of Ruby, including Ruby 2.3 (*@ferdinandrosario*) Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] - * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] - * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] + * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.5[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.5[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v1.5.4...v1.5.5[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v1.5.4\...v1.5.5[full diff] == 1.5.4 (2016-01-03) - @mojavelinux Enhancements:: - * translate README into French (@anthonny, @mogztter, @gscheibel, @mgreau) (#1630) + + * translate README into French (#1630) (*@anthonny*, *@mogztter*, *@gscheibel*, *@mgreau*) * allow linkstyle in manpage output to be configured (#1610) Improvements:: + * upgrade to MathJax 2.6.0 and disable loading messages * upgrade to Font Awesome 4.5.0 * disable toc if document has no sections (#1633) * convert inline asciimath to MathML (using asciimath gem) in DocBook converter (#1622) - * add attribute to control build reproducibility (@bk2204) (#1453) + * add attribute to control build reproducibility (#1453) (*@bk2204*) * recognize \file:/// as a file root in Opal browser env (#1561) - * honor icon attribute on admonition block when font-based icons are enabled (@robertpanzer) (#1593) + * honor icon attribute on admonition block when font-based icons are enabled (#1593) (*@robertpanzer*) * resolve custom icon relative to iconsdir; add file extension if absent (#1634) * allow asciidoctor cli to resolve library path when invoked without leading ./ Compliance:: + * allow special section to be nested at any depth (#1591) * ensure colpcwidth values add up to 100%; increase precision of values to 4 decimal places (#1647) * ignore blank cols attribute on table (#1647) * support shorthand syntax for block attributes on document title (#1650) -Bug fixes:: +Bug Fixes:: + * don't include default toc in AsciiDoc table cell; don't pass toc location attributes to nested document (#1582) * guard against nil dlist list item in find_by (#1618) * don't swallow trailing line when include file is not readable (#1602) * change xlink namespace to xl in DocBook 5 output to prevent parse error (#1597) * make callouts globally unique within document, including AsciiDoc table cells (#1626) - * initialize Slim-related attributes regardless of when Slim was loaded (@terceiro) (#1576) - * differentiate literal backslash from escape sequence in manpage output (@ds26gte) (#1604) - * don't mistake line beginning with \. for troff macro in manpage output (@ds26gte) (#1589) - * escape leading dots so user content doesn't trigger troff macros in manpage output (@ds26gte) (#1631) - * use \c after .URL macro to remove extraneous space in manpage output (@ds26gte) (#1590) + * initialize Slim-related attributes regardless of when Slim was loaded (#1576) (*@terceiro*) + * differentiate literal backslash from escape sequence in manpage output (#1604) (*@ds26gte*) + * don't mistake line beginning with \. for troff macro in manpage output (#1589) (*@ds26gte*) + * escape leading dots so user content doesn't trigger troff macros in manpage output (#1631) (*@ds26gte*) + * use \c after .URL macro to remove extraneous space in manpage output (#1590) (*@ds26gte*) * fix missing endline after .URL macro in manpage output (#1613) - * properly handle spacing around .URL/.MTO macro in manpage output (@ds26gte) (#1641) + * properly handle spacing around .URL/.MTO macro in manpage output (#1641) (*@ds26gte*) * don't swallow doctitle attribute followed by block title (#1587) * change strategy for splitting names of author; fixes bug in Opal/Asciidoctor.js * don't fail if library is loaded more than once Infrastructure:: - * remove trailing endlines in project source code + + * remove trailing newlines in project source code * update contributing guidelines * explicitly test ifeval scenario raised in issue #1585 * remove backreference substitution hack for Opal/Asciidoctor.js @@ -128,36 +1033,38 @@ Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] - * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] - * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.4[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.4[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v1.5.3...v1.5.4[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v1.5.3\...v1.5.4[full diff] == 1.5.3 (2015-10-31) - @mojavelinux Enhancements:: + * add support for interactive & inline SVGs (#1301, #1224) - * add built-in manpage backend (@davidgamba) (#651) - * create Mallard backend; asciidoctor/asciidoctor-mallard (@bk2204) (#425) - * add AsciiMath to MathML converter to support AsciiMath in DocBook converter (@pepijnve) (#954) + * add built-in manpage backend (#651) (*@davidgamba*) + * create Mallard backend; asciidoctor/asciidoctor-mallard (#425) (*@bk2204*) + * add AsciiMath to MathML converter to support AsciiMath in DocBook converter (#954) (*@pepijnve*) * allow text of selected lines to be highlighted in source block by Pygments or CodeRay (#1429) * use value of `docinfo` attribute to control docinfo behavior (#1510) - * add `docinfosubs` attribute to control which substitutions are performed on docinfo files (@mogztter) (#405) - * drop ability to specify multiple attributes with a single `-a` flag when using the CLI (@mogztter) (#405) - * make subtitle separator chars for document title configurable (@rmannibucau) (#1350) + * add `docinfosubs` attribute to control which substitutions are performed on docinfo files (#405) (*@mogztter*) + * drop ability to specify multiple attributes with a single `-a` flag when using the CLI (#405) (*@mogztter*) + * make subtitle separator chars for document title configurable (#1350) (*@rmannibucau*) * make XrefInlineRx regexp more permissive (Mathieu Boespflug) (#844) Improvements:: - * load JavaScript and CSS at bottom of HTML document (@mogztter) (#1238) - * list available backends in help text (@plaindocs) (#1271) + + * load JavaScript and CSS at bottom of HTML document (#1238) (*@mogztter*) + * list available backends in help text (#1271) (*@plaindocs*) * properly expand tabs in literal text (#1170, #841) - * add `source-indent` as document attribute (@mogztter) (#1169) + * add `source-indent` as document attribute (#1169) (*@mogztter*) * upgrade MathJax to 2.5.3 (#1329) - * upgrade Font Awesome to 4.4.0 (@mogztter) (#1465) + * upgrade Font Awesome to 4.4.0 (#1465) (*@mogztter*) * upgrade highlight.js to 8.6 (now 8.9.1) (#1390) * don't abort if syntax highlighter isn't available (#1253) * insert docinfo footer below footer div (#1503) @@ -166,10 +1073,10 @@ * restore attributes to header attributes after parse (#1255) * allow docdate and doctime to be overridden (#1495) * add CSS class `.center` for center block alignment (#1456) - * recognize U+2022 (bullet) as alternative marker for unordered lists (@mogztter) (#1177) + * recognize U+2022 (bullet) as alternative marker for unordered lists (#1177) (*@mogztter*) * allow videos to work for local files by prepending asset-uri-scheme (Chris) (#1320) * always assign playlist param when loop option is enabled for YouTube video - * parse isolated version in revision line (@bk2204) (#790) + * parse isolated version in revision line (#790) (*@bk2204*) * autoload Tilt when template converter is instantiated (#1313) * don't overwrite existing id entry in references table (#1256) * use outfilesuffix attribute defined in header when resolving outfile (#1412) @@ -180,20 +1087,21 @@ * load Droid Sans Mono 700 in default stylesheet * set line height of table cells used for syntax highlighting * set font-family of kbd; refine styling (#1423) - * extract condition into `quote_lines?` method (@mogztter) - * extract inline code into `read_paragraph` method (@mogztter) + * extract condition into `quote_lines?` method (*@mogztter*) + * extract inline code into `read_paragraph` method (*@mogztter*) * parent of block in ListItem should be ListItem (#1359) * add helper methods to List and ListItem (#1551) - * add method `AbstractNode#add_role` and `AbstractNode#remove_role` (@robertpanzer) (#1366) + * add method `AbstractNode#add_role` and `AbstractNode#remove_role` (#1366) (*@robertpanzer*) * introduce helper methods for sniffing URIs (#1422) * add helper to calculate basename without file extension - * document `-I` and `-r` options in the manual page (@bk2204) - * fix `+--help+` output text for `-I` (@bk2204) + * document `-I` and `-r` options in the manual page (*@bk2204*) + * fix `+--help+` output text for `-I` (*@bk2204*) * don't require open-uri-cached if already loaded * do not attempt to scan pattern of non-existent directory in template converter * prevent CodeRay from bolding every 10th line number Compliance:: + * use `` for footnote reference in text instead of `` (#1523) * fix alignment of wrapped text in footnote (#1524) * include full stop after footnote number in embeddable HTML @@ -201,9 +1109,10 @@ * resolve missing attribute in ifeval to empty string (#1387) * support unbreakable & breakable options on table (rockyallen) (#1140) -Bug fixes:: +Bug Fixes:: + * don't truncate exception stack in `Asciidoctor.load` (#1248) - * don't fail to save cause of Java exception (@robertpanzer) (#1458) + * don't fail to save cause of Java exception (#1458) (*@robertpanzer*) * fix precision error in timings report (#1342) * resolve regexp for inline macro lazily (#1336) * block argument to `find_by` should filter results (#1393) @@ -220,7 +1129,7 @@ * fix invalid color value in default CodeRay theme * built-in writer no longer fails if output is nil (#1544) * custom template engine options should take precedence - * fallback to require with a non-relative path to support Debian package (@mogztter) + * fallback to require with a non-relative path to support Debian package (*@mogztter*) * pass opts to recursive invocations of `PathResolver#system_path` * fix and test external links in docbook backend * use format symbol `:html` instead of `:html5` for Slim to fix warnings @@ -228,6 +1137,7 @@ * fix grammar in warning messages regarding thread_safe gem Infrastructure:: + * migrate opal_ext from core to Asciidoctor.js (#1517) * add Ruby 2.2 to CI build; only specify minor Ruby versions * enable containerized builds on Travis CI @@ -236,29 +1146,28 @@ Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] - * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] - * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.3[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.3[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v1.5.2...v1.5.3[full diff] -// end::compact[] +https://github.com/asciidoctor/asciidoctor/compare/v1.5.2\...v1.5.3[full diff] == 1.5.2 (2014-11-27) - @mojavelinux Enhancements:: - * add docinfo extension (@mogztter) (#1162) - * allow docinfo to be in separate directory from content, specified by `docinfodir` attribute (@mogztter) (#511) - * enable TeX equation auto-numbering if `eqnums` attribute is set (@jxxcarlson) (#1110) + * add docinfo extension (#1162) (*@mogztter*) + * allow docinfo to be in separate directory from content, specified by `docinfodir` attribute (#511) (*@mogztter*) + * enable TeX equation auto-numbering if `eqnums` attribute is set (#1110) (*@jxxcarlson*) Improvements:: * recognize `--` as valid line comment for callout numbers; make line comment configurable (#1068) * upgrade highlight.js to version 8.4 (#1216) - * upgrade Font Awesome to version 4.2.0 (@clojens) (#1201) + * upgrade Font Awesome to version 4.2.0 (#1201) (*@clojens*) * define JAVASCRIPT_PLATFORM constant to simplify conditional logic in the JavaScript environment (#897) * provide access to destination directory, outfile and outdir via Document object (#1203) * print encoding information in version report produced by `asciidoctor -v` (#1210) @@ -269,13 +1178,13 @@ * make start number for unique id generation configurable (#1148) * normalize and force UTF-8 encoding of docinfo content (#831) * allow subs and default_subs to be specified in Block constructor (#749) - * enhance error message when reading binary input files (@mogztter) (#1158) + * enhance error message when reading binary input files (#1158) (*@mogztter*) * add `append` method as alias to `<<` method on AbstractBlock (#1085) * assign value of `preface-title` as title of preface node (#1090) * fix spacing around checkbox in checklist (#1138) - * automatically load Slim's include plugin when using slim templates (@jirutka) (#1151) - * mixin Slim helpers into execution scope of slim templates (@jirutka) (#1143) - * improve DocBook output for manpage doctype (@bk2204) (#1134, #1142) + * automatically load Slim's include plugin when using slim templates (#1151) (*@jirutka*) + * mixin Slim helpers into execution scope of slim templates (#1143) (*@jirutka*) + * improve DocBook output for manpage doctype (#1134, #1142) (*@bk2204*) Compliance:: @@ -283,7 +1192,7 @@ * allow empty cell to appear at end of table row (#1106) * only produce one row for table in CSV or DSV format with a single cell (#1180) -Bug fixes:: +Bug Fixes:: * add explicit to_s call to generate delimiter settings for MathJax config (#1198) * fix includes that reference absolute Windows paths (#1144) @@ -291,18 +1200,18 @@ Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] - * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] - * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.2[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.2[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v1.5.1...v1.5.2[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v1.5.1\...v1.5.2[full diff] == 1.5.1 (2014-09-29) - @mojavelinux -Bug fixes:: +Bug Fixes:: * recognize tag directives inside comments within XML files for including tagged regions * restore passthroughs inside footnotes when more than one footnote appears on the same line @@ -322,14 +1231,14 @@ Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] - * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] - * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.1[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.1[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v1.5.0...v1.5.1[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v1.5.0\...v1.5.1[full diff] == 1.5.0 (2014-08-12) - @mojavelinux @@ -351,10 +1260,10 @@ * {star} switch to open source fonts (Open Sans, Noto Serif and Droid Sans Mono) in default stylesheet, major refinements to theme (#879) * {star} embed remote images when data-uri and allow-uri-read attributes are set (#612) * {star} support leveloffset on include directive and honor relative leveloffset values (#530) - * {star} switch default docbook backend to docbook5 (@bk2204) (#554) + * {star} switch default docbook backend to docbook5 (#554) (*@bk2204*) * {star} added hide-uri-scheme attribute to hide uri scheme in automatic links (#800) * {star} allow substitutions to be incrementally added & removed (#522) - * {star} add compatibility with Opal, add shim compat library, use compatibility regexp, require libraries properly (@mogztter) (#679, #836, #846) + * {star} add compatibility with Opal, add shim compat library, use compatibility regexp, require libraries properly (#679, #836, #846) (*@mogztter*) * {star} output XHTML when backend is xhtml or xhtml5 (#494) * {star} add shorthand subs and specialchars as an alias for specialcharacters (#579) * {star} deprecate toc2 attribute in favor of position and placement values on toc attribute (e.g., toc=left) (#706) @@ -368,7 +1277,7 @@ * match implicit URLs that use the file scheme (#853) * added sectnumlevels to control depth of section numbering (#549) * add hardbreaks option to block (#630) - * sub attributes in manname (e.g., pass:[{docname}]) + * substitute attribute references in manname * warn on reference to missing attribute if attribute-missing is "warn" * only enable toc macro if toc is enabled and toc-placement attribute has the value macro (#706) * add sectnums attribute as alternative alias to numbered attribute (#684) @@ -377,9 +1286,9 @@ * {star} don't select lines that contain a tag directive when including tagged lines, make tag regexp more strict (#1027) * {star} use https scheme for assets by default - * {star} upgrade to Font Awesome 4.1 (@mogztter) (#752) - * {star} improve print styles, add print styles for book doctype (@leif81) (#997, #952) - * {star} add proper grid and frame styles for tables (@leif81) (#569) + * {star} upgrade to Font Awesome 4.1 (#752) (*@mogztter*) + * {star} improve print styles, add print styles for book doctype (#997, #952) (*@leif81*) + * {star} add proper grid and frame styles for tables (#569) (*@leif81*) * {star} use glyphs for checkboxes when not using font icons (#878) * {star} prefer source-language attribute over language attribute for defining default source language (#888) * {star} pass document as first argument to process method on Preprocessor @@ -392,15 +1301,15 @@ * constrain subscript & superscript markup (#564, #936) * match cell specs when cell separator is customized (#985) * use stylesheet to set default table width (#975) - * display nested elements correctly in toc (@kenfinnigan) (#967) - * add support for id attribute on links (@mogztter) (#935) - * add support for title attribute on links (@aslakknutsen) - * add -t flag to cli to control output of timing information (@mogztter) (#909) + * display nested elements correctly in toc (#967) (*@kenfinnigan*) + * add support for id attribute on links (#935) (*@mogztter*) + * add support for title attribute on links (*@aslakknutsen*) + * add -t flag to cli to control output of timing information (#909) (*@mogztter*) * rewrite converter API (#778) * rewrite extensions to support extension instances for AsciidoctorJ (#804) * integrate thread_safe gem (#638) * allow inline macro extensions that define a custom regexp to be matched (#792) - * make Reader#push_include work with default file, path and dir (@bk2204) (#743) + * make Reader#push_include work with default file, path and dir (#743) (*@bk2204*) * honor custom outfilesuffix and introduce relfileprefix (#801) * add author and copyright to meta in HTML5 backend (#838) * output attribution in front of citetitle for quote and verse blocks @@ -409,10 +1318,10 @@ * print runtime environment in version output, support -v as version flag (#785) * unwrap preamble if standalone (#533) * drop leading & trailing blank lines in verbatim & raw content (#724) - * remove trailing endlines from source data (#727) + * remove trailing newlines from source data (#727) * add flag to cli to suppress warnings (#557) * emit warning if tag(s) not found in include file (#639) - * use element for vertical table headers instead of header class (@davidgamba) (#738) + * use element for vertical table headers instead of header class (#738) (*@davidgamba*) * share select references between AsciiDoc-style cell & main document (#729) * number chapters sequentially, always (#685) * add vbar attribute, make brvbar resolve properly (#643) @@ -420,20 +1329,20 @@ * enable sidebar toc for small screens (#628) * add square brackets around button in HTML output (#631) * make language hover text work for all languages in listing block - * set background color on toc2 to cover scrolling content (@neher) + * set background color on toc2 to cover scrolling content (*@neher*) * make document parsing a discrete step, make Reader accessible as property on Document * allow custom converter to set backend info such as outfilesuffix and htmlsyntax - * report an informative error message when a converter cannot be resolved (@mogztter) + * report an informative error message when a converter cannot be resolved (*@mogztter*) * add conum class to b element when icons are disabled, make conum CSS selector more specific - * expose Document object to extension point IncludeProcessor (@aslakknutsen) + * expose Document object to extension point IncludeProcessor (*@aslakknutsen*) * style audioblock title, simplify rules for block titles * alias :name_attributes to :positional_attributes in extension DSL - * upgrade to highlight.js 7.4 (and later 8.0) (@mogztter) (#756) + * upgrade to highlight.js 7.4 (and later 8.0) (#756) (*@mogztter*) Compliance:: * only include xmlns in docbook45 backend if xmlns attribute is specified (#929) - * add xmlns attribute for xhtml output (@bk2204) + * add xmlns attribute for xhtml output (*@bk2204*) * warn if table without a body is converted to DocBook (#961) * wrap around admonition inside example block in DocBook 4.5 (#931) * use if block image doesn't have a title (#927) @@ -442,20 +1351,20 @@ * add compliance setting to control use of shorthand property syntax (#789) * wrap top-level content inside preamble in DocBook backend when doctype is book (#971) * escape special chars in image alt text (#972) - * set starting number in ordered list for docbook (@megathaum) (#925) + * set starting number in ordered list for docbook (#925) (*@megathaum*) * match word characters in regular expressions as defined by Unicode (#892) * put source language class names on child code element of pre element (#921) * ignore case of attribute in conditional directives (#903) * allow attribute entry to reset / reseed counter (#870) * allow doctype to be set in AsciiDoc table cell (#863) - * match URL macro following entity (@jmbruel) (#819) + * match URL macro following entity (#819) (*@jmbruel*) * handle BOM when normalizing source (#824) * don't output revhistory if revdate is not set (#802) * perform normal subs on verse content (#799) * automatically wrap part intro content in partintro block, emit warning if part is invalid (#768) * force encoding of docinfo content to UTF-8 (#773) * add scaling & alignment attributes to block image in DocBook backend (#763) - * add support for pass:[anchor:[\]] macro (#531) + * add support for \anchor:[] macro (#531) * substitute anchor and xref macros in footnotes (#676) * remove all string mutation operations for compatibility with Opal (#735) * honor reftext defined in embedded section title anchor (#697) @@ -489,18 +1398,18 @@ * fixed broken passthroughs caused by source highlighting (#720) * copy custom stylesheet if linkcss is set (#300) * honor list continuations for indented, nested list items (#664) - * fix syntax errors in converters (@jljouannic) + * fix syntax errors in converters (*@jljouannic*) * fix iconfont-remote setting - * fix syntax error (target -> node.target) in Docbook 5 converter (@jf647) + * fix syntax error (target -> node.target) in Docbook 5 converter (*@jf647*) * output and style HTML for toc macro correctly Infrastructure:: * add Ruby 2.1 to list of supported platforms * reenable rbx in Travis build - * switch tests to minitest (@ktdreyer) - * update RPM for Fedora Rawhide (@ktdreyer) - * refactor unit tests so they work in RubyMine (@cmoulliard) + * switch tests to minitest (*@ktdreyer*) + * update RPM for Fedora Rawhide (*@ktdreyer*) + * refactor unit tests so they work in RubyMine (*@cmoulliard*) * add preliminary benchmark files to repository (#1021) * clean out old fixtures from test suite (#960) * add initial Cucumber test infrastructure (#731) @@ -508,18 +1417,18 @@ * build gemspec files using git ls-tree (#653) * use in-process web server for URI tests * update manpage to reflect updates in 1.5.0 - * rework README (@mogztter) (#651) + * rework README (#651) (*@mogztter*) Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] - * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] - * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=8&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.0[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v0.1.4...v1.5.0[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v0.1.4\...v1.5.0[full diff] == 0.1.4 (2013-09-05) - @mojavelinux @@ -554,11 +1463,11 @@ * ignore front matter used by static site generators if skip-front-matter attribute is set (#502) * sanitize contents of HTML title element in html5 backend (#504) * support toc position for toc2 (#467) - * cli accepts multiple files as input (@lordofthejars) (#227) + * cli accepts multiple files as input (#227) (*@lordofthejars*) * added Markdown-style horizontal rules and pass Markdown tests (#455) * added float clearing classes (.clearfix, .float-group) (#602) * don't disable syntax highlighting when explicit subs is used on listing block - * asciidoctor package now available in Debian Sid and Ubuntu Saucy (@avtobiff) (#216) + * asciidoctor package now available in Debian Sid and Ubuntu Saucy (#216) (*@avtobiff*) Compliance:: @@ -617,7 +1526,7 @@ * output multiple authors in HTML backend (#399) * allow multiple template directories to be specified, document in usage and manpage (#437) * added option to cli to specify template engine (#406) - * added support for external video hosting services in video block macro (@xcoulon) (#587) + * added support for external video hosting services in video block macro (#587) (*@xcoulon*) * strip leading separator(s) on section id if idprefix is blank (#551) * customized styling of toc placed inside body content (#507) * consolidate toc attribute so toc with or without toc-position can make sidebar toc (#618) @@ -646,14 +1555,14 @@ Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] - * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] - * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] + * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] + * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=7&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.4[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v0.1.3...v0.1.4[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v0.1.3\...v0.1.4[full diff] == 0.1.3 (2013-05-30) - @mojavelinux @@ -670,8 +1579,8 @@ * support Markdown-style quote blocks * added section level 5 (maps to h6 element in the html5 backend) (#334) * added btn inline macro (#259) - * added menu inline menu to identify a menu selection (@bleathem) (#173) - * added kbd inline macro to identify a key or key combination (@bleathem) (#172) + * added menu inline menu to identify a menu selection (#173) (*@bleathem*) + * added kbd inline macro to identify a key or key combination (#172) (*@bleathem*) * support alternative quote forms (#196) * added indent attribute to verbatim blocks (#365) * added prettify source-highlighter (#202) @@ -698,7 +1607,7 @@ * assign caption even if no title (#321) * horizontal dlist layout in docbook backend (#298) * set doctitle attribute (#337) - * allow any backend to be specified in cli (@lightguard) (#320) + * allow any backend to be specified in cli (#320) (*@lightguard*) * support for abstract and partintro (#297) Bug Fixes:: @@ -712,17 +1621,17 @@ Improvements:: * added tests for all special sections (#80) - * added test for attributes defined as string or string array (@lightguard) (#291) + * added test for attributes defined as string or string array (#291) (*@lightguard*) Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] -http://asciidoctor.org/news/2013/05/31/asciidoctor-0-1-3-released[release notes] | +https://asciidoctor.org/news/2013/05/31/asciidoctor-0-1-3-released[release notes] | https://github.com/asciidoctor/asciidoctor/issues?milestone=4&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.3[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v0.1.2...v0.1.3[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v0.1.2\...v0.1.3[full diff] == 0.1.2 (2013-04-25) - @mojavelinux @@ -732,7 +1641,7 @@ Enhancements:: - * new website at http://asciidoctor.org + * new website at https://asciidoctor.org * added a default stylesheet (#76) * added viewport meta tag for mobile browsers (#238) * set attributes based on safe mode (#244) @@ -772,12 +1681,12 @@ Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] -http://asciidoctor.org/news/2013/04/25/asciidoctor-0-1-2-released[release notes] | +https://asciidoctor.org/news/2013/04/25/asciidoctor-0-1-2-released[release notes] | https://github.com/asciidoctor/asciidoctor/issues?milestone=3&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.2[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v0.1.1...v0.1.2[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v0.1.1\...v0.1.2[full diff] == 0.1.1 (2013-02-26) - @erebor @@ -828,11 +1737,11 @@ Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=1&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.1[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v0.1.0...v0.1.1[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v0.1.0\...v0.1.1[full diff] == 0.1.0 (2013-02-04) - @erebor @@ -855,7 +1764,7 @@ * added support for name=value@ attribute syntax passed via cli (#97) * attr refs no longer case sensitive (#109) * fixed several cases of incorrect list handling - * don't allow links to consume endlines or surrounding angled brackets + * don't allow links to consume newlines or surrounding angled brackets * recognize single quote in author name * support horizontal labeled list style * added support for the d cell style @@ -880,11 +1789,11 @@ Distribution Packages:: - * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] + * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=12&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.0[git tag] | -https://github.com/asciidoctor/asciidoctor/compare/v0.0.9...v0.1.0[full diff] +https://github.com/asciidoctor/asciidoctor/compare/v0.0.9\...v0.1.0[full diff] == Older releases (pre-0.0.1) diff -Nru asciidoctor-1.5.5/compat/asciidoc.conf asciidoctor-2.0.10/compat/asciidoc.conf --- asciidoctor-1.5.5/compat/asciidoc.conf 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/compat/asciidoc.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,395 +0,0 @@ -# This file is an AsciiDoc configuration file that makes -# AsciiDoc conform with Asciidoctor's fixes and customizations. -# -# Place this file in the same directory as your AsciiDoc document and the -# AsciiDoc processor (asciidoc) will automatically use it. - -[miscellaneous] -newline=\n - -[attributes] -# make html5 the default html backend -backend-alias-html=html5 -asterisk=* -backtick=` -brvbar=¦ -caret=^ -# plus introduced in AsciiDoc 8.6.9 -plus=+ -blank= -tilde=~ -cpp=C++ -user-home={eval:os.path.expanduser('~')} -vbar=| -# NOTE use -a no-inline-literal to set compat-mode to default when using AsciiDoc Python -ifndef::no-inline-literal[] -compat-mode=legacy -endif::[] - -[replacements] -# right single quote -(?[\S].*?)(?: +\1)?$ -sect1=^(==|##) +(?P[\S].*?)(?: +\1)?$ -sect2=^(===|###) +(?P<title>[\S].*?)(?: +\1)?$ -sect3=^(====|####) +(?P<title>[\S].*?)(?: +\1)?$ -sect4=^(=====|#####) +(?P<title>[\S].*?)(?: +\1)?$ -sect5=^(======|######) +(?P<title>[\S].*?)(?: +\1)?$ - -# Disable subs on pass block by default -[blockdef-pass] -subs=none - -# enables fenced code blocks -# FIXME I haven't sorted out yet how to do syntax highlighting -[blockdef-fenced-code] -delimiter=^```(?:\w+(?:,numbered)?)?$ -ifdef::language[] -style=source -template::[source-filter-style] -endif::language[] -ifndef::language[] -template=listingblock -subs=verbatim -posattrs=style -endif::language[] - -# enables blockquotes to be defined using two double quotes -[blockdef-air-quote] -template::[blockdef-quote] -delimiter=^""$ - -# markdown-style blockquote (paragraph only) -# FIXME does not strip leading > on subsequent lines -[paradef-markdown-quote] -delimiter=(?s)>\s*(?P<text>\S.*) -style=quote -quote-style=template="quoteparagraph",posattrs=("style","attribution","citetitle") - -# fix regex for callout list to require number; also makes markdown-style blockquote work -[listdef-callout] -posattrs=style -delimiter=^<?(?P<index>\d+>) +(?P<text>.+)$ -type=callout -tags=callout -style=arabic - -# enables literal block to be used as source block -[blockdef-literal] -template::[source-filter-style] - -# enables source block when source-highlighter is not defined -ifndef::source-highlighter[] -[source-filter-style] -source-style=template="listingblock",subs=("specialcharacters","callouts"),posattrs=("style","language","src_numbered","src_tab") - -[paradef-default] -template::[source-filter-style] - -[paradef-literal] -template::[source-filter-style] - -[blockdef-open] -template::[source-filter-style] - -[blockdef-listing] -template::[source-filter-style] -endif::source-highlighter[] - -[tabledef-csv] -template::[tabledef-default] -delimiter=^,={3,}$ -format=csv - -[tabledef-dsv] -template::[tabledef-default] -delimiter=^:={3,}$ -format=dsv - -[macros] -ifdef::no-inline-literal[] -(?su)\\?\+\+(?P<passtext>.*?)\+\+=pass[specialcharacters] -(?su)(?<![+\w])(\\?\+(?P<passtext>\S|\S.*?\S)\+)(?![+\w])=pass[specialcharacters] -endif::no-inline-literal[] - -# additional callout match behind line comments -#(?://|#|;;) ?\((?P<index>\d+)\)=callout -# additional callout match for XML -[\\]?<!--(?P<index>\d+)-->=callout - -# --- or *** or ___ or - - - or * * * or _ _ _ (in addition to the built-in ''') -^ {0,3}([-\*_])( *)\1\2\1$=#ruler - -# btn:[Save] -(?su)(?<!\w)\\?btn:\[(?P<attrlist>(?:\\\]|[^\]])+?)\]=button - -# kbd:[F11] or kbd:[Ctrl+T] or kbd:[Ctrl,T] -(?su)(?<!\w)\\?kbd:\[(?P<attrlist>(?:\\\]|[^\]])+?)\]=keyboard - -# menu:Search[] or menu:File[New...] or menu:View[Page Style, No Style] -# TODO implement menu:View[Page Style > No Style] syntax -(?su)(?<!\w)[\\]?(?P<name>menu):(?P<target>\w|\w.*?\S)?\[(?P<attrlist>.*?)\]= - -ifdef::basebackend-html[] - -[sect5] -<div class="sect5{style? {style}}{role? {role}}"> -<h6{id? id="{id}"}>{title}</h6> -| -</div> - -[button-inlinemacro] -<b class="button">{1}</b> - -[keyboard-inlinemacro] -{set2:keys:{eval:re.split(r'(?<!\+ |.\+)\+', '{1}')}} -{2%}{eval:len({keys}) == 1}<kbd>{1}</kbd> -{2%}{eval:len({keys}) == 2}<kbd class="combo"><kbd>{eval:{keys}[0].strip()}</kbd>+<kbd>{eval:{keys}[1].strip()}</kbd></kbd> -{2%}{eval:len({keys}) == 3}<kbd class="combo"><kbd>{eval:{keys}[0].strip()}</kbd>+<kbd>{eval:{keys}[1].strip()}</kbd>+<kbd>{eval:{keys}[2].strip()}</kbd></kbd> -{2#}{3%}<kbd class="combo"><kbd>{1}</kbd>+<kbd>{2}</kbd></kbd> -{3#}<kbd class="combo"><kbd>{1}</kbd>+<kbd>{2}</kbd>+<kbd>{3}</kbd></kbd> - -[menu-inlinemacro] -{1%}<span class="menu">{target}</span> -{1#}{2%}<span class="menuseq"><span class="menu">{target}</span> ▸ <span class="menuitem">{1}</span></span> -{2#}{3%}<span class="menuseq"><span class="menu">{target}</span> ▸ <span class="submenu">{1}</span> ▸ <span class="menuitem">{2}</span></span> -{3#}<span class="menuseq"><span class="menu">{target}</span> ▸ <span class="submenu">{1}</span> ▸ <span class="submenu">{2}</span> ▸ <span class="menuitem">{3}</span></span> - -[literal-inlinemacro] -<code>{passtext}</code> - -[tags] -emphasis=<em{1? class="{1}"}>|</em> -strong=<strong{1? class="{1}"}>|</strong> -monospaced=<code{1? class="{1}"}>|</code> -superscript=<sup{1? class="{1}"}>|</sup> -subscript=<sub{1? class="{1}"}>|</sub> -mark={1=<mark>}{1?<span class="{1}">}|{1?</span>}{1=</mark>} - -[monospacedwords] -<code>{words}</code> - -ifdef::linkattrs[] -[http-inlinemacro] -<a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> -[https-inlinemacro] -<a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> -[ftp-inlinemacro] -<a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> -[file-inlinemacro] -<a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> -[irc-inlinemacro] -<a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> -[mailto-inlinemacro] -<a href="mailto:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={target}}</a> -[link-inlinemacro] -<a href="{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={target}}</a> -endif::linkattrs[] - -[listtags-numbered] -list=<div class="olist{style? {style}}{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ol class="{style}"{style@loweralpha: type="a"}{style@lowerroman: type="i"}{style@upperalpha: type="A"}{style@upperroman: type="I"}{start? start="{start}"}>|</ol></div> - -[tabletags-monospaced] -paragraph=<p class="tableblock"><code>|</code></p> - -[sect0] -<h1{id? id="{id}"} class="sect0">{title}</h1> -| - -# support for document title in embedded documents -ifeval::[not config.header_footer] -[preamble] -<h1>{title={doctitle}}</h1>{set:title-rendered:} -<div id="preamble"> -<div class="sectionbody"> -| -</div> -{toc,toc2#}{toc-placement$preamble:}{template:toc} -</div> - -[sect1] -{title-rendered%}<h1>{doctitle}</h1> -<div class="sect1{style? {style}}{role? {role}}"> -<h2{id? id="{id}"}>{numbered?{sectnum} }{title}</h2> -<div class="sectionbody"> -| -</div> -</div> -endif::[] - -# override to add the admonition name to the class attribute of the outer element -[admonitionblock] -<div class="admonitionblock {name}{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}> -<table><tr> -<td class="icon"> -{data-uri%}{icons#}<img src="{icon={iconsdir}/{name}.png}" alt="{caption}"> -{data-uri#}{icons#}<img alt="{caption}" src="data:image/png;base64, -{data-uri#}{icons#}{sys:"{python}" -u -c "import base64,sys; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{icon={iconsdir}/{name}.png}")}"}"> -{icons%}<div class="title">{caption}</div> -</td> -<td class="content"> -<div class="title">{title}</div> -| -</td> -</tr></table> -</div> - -# modified so that: -# a. imagesdir is only prepended if target is not a uri or absolute path (relative path only) -# b. automatic alt text is calculated from basename of target without extension -# note that the escaped_target attribute must be set in order to use a uri in the conditional attribute reference -[image-inlinemacro] -<span class="image{role? {role}}"{float? style="float: {float}"}>{set2:escaped_target:{eval:'{target}'.replace(':','\:')}} -<a class="image" href="{link}"> -{data-uri%}<img src="{target@^(/|https?\://).*:{escaped_target}:{imagesdir?{imagesdir}}{imagesdir?/}{escaped_target}}" alt="{alt={eval:os.path.splitext(os.path.basename('{target}'))[0]}}"{width? width="{width}"}{height? height="{height}"}{title? title="{title}"}> -{data-uri#}<img alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}{title? title="{title}"} -{data-uri#}{sys:"{python}" -u -c "import mimetypes,base64,sys; print 'src=\"data:'+mimetypes.guess_type(r'{target}')[0]+';base64,'; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{imagesdir=}",r"{target}")}"}"> -{link#}</a> -</span> - -# modified so that: -# a. imagesdir is only prepended if target is not a uri or absolute path (relative path only) -# b. automatic alt text is calculated from basename of target without extension -# note that the escaped_target attribute must be set in order to use a uri in the conditional attribute reference -[image-blockmacro] -<div class="imageblock{style? {style}}{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}{align? style="text-align:{align};"}{float? style="float: {float}"}> -<div class="content">{set2:escaped_target:{eval:'{target}'.replace(':','\:')}} -<a class="image" href="{link}"> -{data-uri%}<img src="{target@^(/|https?\://).*:{escaped_target}:{imagesdir?{imagesdir}}{imagesdir?/}{escaped_target}}" alt="{alt={eval:os.path.splitext(os.path.basename('{target}'))[0]}}"{width? width="{width}"}{height? height="{height}"}> -{data-uri#}<img alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"} -{data-uri#}{sys:"{python}" -u -c "import mimetypes,base64,sys; print 'src=\"data:'+mimetypes.guess_type(r'{target}')[0]+';base64,'; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{imagesdir=}",r"{target}")}"}"> -{link#}</a> -</div> -<div class="title">{caption={figure-caption} {counter:figure-number}. }{title}</div> -</div> - -# a common template for emitting the attribute for a quote or verse block -# don't output attribution div if attribution or citetitle are both empty -[attribution] -{attribution,citetitle#}<div class="attribution"> -— {attribution}{citetitle?<br>} -<cite>{citetitle}</cite> -{attribution,citetitle#}</div> - -# override to use blockquote element for content and cite element for cite title -[quoteblock] -<div class="quoteblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}> -<div class="title">{title}</div> -<blockquote> -| -</blockquote> -template::[attribution] -</div> - -# override to use cite element for cite title -[verseblock] -<div class="verseblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}> -<div class="title">{title}</div> -<pre class="content"> -| -</pre> -template::[attribution] -</div> - -# override tabletags to support cellbgcolor -[tabletags-default] -headdata=<th class="tableblock halign-{halign=left} valign-{valign=top}"{colspan@1:: colspan="{colspan}"}{rowspan@1:: rowspan="{rowspan}"}{cellbgcolor? style="background-color:{cellbgcolor};"}>|</th> -bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}"{colspan@1:: colspan="{colspan}"}{rowspan@1:: rowspan="{rowspan}"}{cellbgcolor? style="background-color:{cellbgcolor};"}>|</td> - -# override header cells to use th -[tabletags-header] -bodydata=<th class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }>|</th> -paragraph=<p class="tableblock">|</p> - -[toc] -<div id="toc"> -<div id="toctitle">{toc-title}</div> -ifdef::toc2[] -<script type="text/javascript"> -document.body.className += ' toc2'; -document.getElementById('toc').className = 'toc2'; -</script> -endif::toc2[] -<noscript><p><b>JavaScript must be enabled in your browser to display the table of contents.</b></p></noscript> -</div> - -endif::basebackend-html[] - -# Override docinfo to support subtitle -ifdef::basebackend-docbook[] - -[sect5] -<section{id? id="{id}"}{role? role="{role}"}{reftext? xreflabel="{reftext}"}> -<title>{title} -| - - -[tags] -monospaced=| -subscript=| -superscript=| - -[button-inlinemacro] -{1} - -[keyboard-inlinemacro] -{set2:keys:{eval:re.split(r'(?{1} -{2%}{eval:len({keys}) == 2}{eval:{keys}[0].strip()}{eval:{keys}[1].strip()} -{2%}{eval:len({keys}) == 3}{eval:{keys}[0].strip()}{eval:{keys}[1].strip()}{eval:{keys}[2].strip()} -{2#}{3%}{1}{2} -{3#}{1}{2}{3} - -[menu-inlinemacro] -{1%}{target} -{1#}{2%}{target} {1} -{2#}{3%}{target} {1} {2} -{3#}{target} {1} {2} {3} - -# override tabletags to support cellbgcolor -[tabletags-default] -headdata={cellbgcolor?}| -bodydata={cellbgcolor?}| - -[docinfo] -ifndef::notitle[] -{set2:subtitle_offset:{eval:'{doctitle}'.rfind(': ')}} -{eval:{subtitle_offset} != -1}{eval:'{doctitle}'[0:{subtitle_offset}]} -{eval:{subtitle_offset} != -1}{eval:'{doctitle}'[{subtitle_offset} + 2:]} -{eval:{subtitle_offset} < 0}{doctitle} -endif::notitle[] -{revdate} -# To ensure valid articleinfo/bookinfo when there is no AsciiDoc header. -{doctitle%}{revdate%}{docdate} -{authored#} -{firstname} -{middlename} -{lastname} -{email} -{authored#} -{authorinitials} -{revnumber?{revnumber}}{revdate}{authorinitials?{authorinitials}}{revremark?{revremark}} -{docinfo1,docinfo2#}{include:{docdir}/docinfo.xml} -{docinfo,docinfo2#}{include:{docdir}/{docname}-docinfo.xml} -{orgname} - -endif::basebackend-docbook[] diff -Nru asciidoctor-1.5.5/compat/font-awesome-3-compat.css asciidoctor-2.0.10/compat/font-awesome-3-compat.css --- asciidoctor-1.5.5/compat/font-awesome-3-compat.css 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/compat/font-awesome-3-compat.css 1970-01-01 00:00:00.000000000 +0000 @@ -1,397 +0,0 @@ -.fa-envelope-alt:before { - content: "\f003"; -} -.fa-star-empty:before { - content: "\f006"; -} -.fa-ok:before { - content: "\f00c"; -} -.fa-remove:before { - content: "\f00d"; -} -.fa-zoom-in:before { - content: "\f00e"; -} -.fa-zoom-out:before { - content: "\f010"; -} -.fa-off:before { - content: "\f011"; -} -.fa-trash:before { - content: "\f014"; -} -.fa-file-alt:before { - content: "\f016"; -} -.fa-time:before { - content: "\f017"; -} -.fa-download-alt:before { - content: "\f019"; -} -.fa-download-alt:before { - content: "\f01a"; -} -.fa-upload-alt:before { - content: "\f01b"; -} -.fa-play-sign:before { - content: "\f01d"; -} -.fa-indent-right-left:before { - content: "\f03b"; -} -.fa-indent-right:before { - content: "\f03c"; -} -.fa-facetime-video:before { - content: "\f03d"; -} -.fa-picture:before { - content: "\f03e"; -} -.fa-edit-sign-o:before { - content: "\f044"; -} -.fa-share-alt-square-o:before { - content: "\f045"; -} -.fa-ok-sign-o:before { - content: "\f046"; -} -.fa-move:before { - content: "\f047"; -} -.fa-plus-sign:before { - content: "\f055"; -} -.fa-minus-sign:before { - content: "\f056"; -} -.fa-remove-sign:before { - content: "\f057"; -} -.fa-ok-sign:before { - content: "\f058"; -} -.fa-question-sign:before { - content: "\f059"; -} -.fa-info-sign:before { - content: "\f05a"; -} -.fa-screenshot:before { - content: "\f05b"; -} -.fa-remove-circle:before { - content: "\f05c"; -} -.fa-ok-circle:before { - content: "\f05d"; -} -.fa-ban-circle:before { - content: "\f05e"; -} -.fa-share-alt:before { - content: "\f064"; -} -.fa-resize-full:before { - content: "\f065"; -} -.fa-resize-small:before { - content: "\f066"; -} -.fa-exclamation-sign:before { - content: "\f06a"; -} -.fa-eye-open:before { - content: "\f06e"; -} -.fa-eye-open-close:before { - content: "\f070"; -} -.fa-warning-sign:before { - content: "\f071"; -} -.fa-folder-close:before { - content: "\f07b"; -} -.fa-folder-close-close-altpen:before { - content: "\f07c"; -} -.fa-move-v:before { - content: "\f07d"; -} -.fa-move-h:before { - content: "\f07e"; -} -.fa-bar-chart:before { - content: "\f080"; -} -.fa-twitter-sign:before { - content: "\f081"; -} -.fa-facebook-sign:before { - content: "\f082"; -} -.fa-thumbs-up-alt:before { - content: "\f087"; -} -.fa-thumbs-down-alt:before { - content: "\f088"; -} -.fa-heart-empty:before { - content: "\f08a"; -} -.fa-signout:before { - content: "\f08b"; -} -.fa-linkedin-sign:before { - content: "\f08c"; -} -.fa-pushpin:before { - content: "\f08d"; -} -.fa-signin:before { - content: "\f090"; -} -.fa-github-sign:before { - content: "\f092"; -} -.fa-upload-alt:before { - content: "\f093"; -} -.fa-lemon:before { - content: "\f094"; -} -.fa-ok-empty:before { - content: "\f096"; -} -.fa-bookmark-empty:before { - content: "\f097"; -} -.fa-phone-sign:before { - content: "\f098"; -} -.fa-hdd:before { - content: "\f0a0"; -} -.fa-bell-alt:before { - content: "\f0f3"; -} -.fa-hand-right:before { - content: "\f0a4"; -} -.fa-hand-left:before { - content: "\f0a5"; -} -.fa-hand-up:before { - content: "\f0a6"; -} -.fa-hand-down:before { - content: "\f0a7"; -} -.fa-circle-arrow-left:before { - content: "\f0a8"; -} -.fa-circle-arrow-right:before { - content: "\f0a9"; -} -.fa-circle-arrow-up:before { - content: "\f0aa"; -} -.fa-circle-arrow-down:before { - content: "\f0ab"; -} -.fa-fullscreen:before { - content: "\f0b2"; -} -.fa-group:before { - content: "\f0c0"; -} -.fa-beaker:before { - content: "\f0c3"; -} -.fa-paper-clip:before { - content: "\f0c6"; -} -.fa-sign-blank:before { - content: "\f0c8"; -} -.fa-pinterest-sign:before { - content: "\f0d3"; -} -.fa-google-plus-sign:before { - content: "\f0d4"; -} -.fa-comment-alt:before { - content: "\f0e5"; -} -.fa-comments-alt:before { - content: "\f0e6"; -} -.fa-lightbulb:before { - content: "\f0eb"; -} -.fa-bell-alt:before { - content: "\f0a2"; -} -.fa-food:before { - content: "\f0f5"; -} -.fa-file-text-alt:before { - content: "\f0f6"; -} -.fa-building:before { - content: "\f0f7"; -} -.fa-hospital:before { - content: "\f0f8"; -} -.fa-h-sign:before { - content: "\f0fd"; -} -.fa-plus-sign-alt:before { - content: "\f0fe"; -} -.fa-double-angle-left:before { - content: "\f100"; -} -.fa-double-angle-right:before { - content: "\f101"; -} -.fa-double-angle-up:before { - content: "\f102"; -} -.fa-double-angle-down:before { - content: "\f103"; -} -.fa-circle-blank:before { - content: "\f10c"; -} -.fa-folder-close-close-alt:before { - content: "\f114"; -} -.fa-folder-close-close-altpen-o:before { - content: "\f115"; -} -.fa-smile:before { - content: "\f118"; -} -.fa-frown:before { - content: "\f119"; -} -.fa-meh:before { - content: "\f11a"; -} -.fa-keyboard:before { - content: "\f11c"; -} -.fa-flag-alt:before { - content: "\f11d"; -} -.fa-microphone-off:before { - content: "\f131"; -} -.fa-calendar-empty:before { - content: "\f133"; -} -.fa-chevron-sign-left:before { - content: "\f137"; -} -.fa-chevron-sign-right:before { - content: "\f138"; -} -.fa-chevron-sign-up:before { - content: "\f139"; -} -.fa-chevron-sign-down:before { - content: "\f13a"; -} -.fa-ellipsis-horizontal:before { - content: "\f141"; -} -.fa-ellipsis-vertical:before { - content: "\f142"; -} -.fa-rss-sign:before { - content: "\f143"; -} -.fa-play-sign:before { - content: "\f144"; -} -.fa-minus-sign-alt:before { - content: "\f146"; -} -.fa-ok-minus:before { - content: "\f147"; -} -.fa-ok-sign:before { - content: "\f14a"; -} -.fa-edit-sign:before { - content: "\f14b"; -} -.fa-external-link-sign:before { - content: "\f14c"; -} -.fa-share-alt-square:before { - content: "\f14d"; -} -.fa-collapse:before { - content: "\f150"; -} -.fa-collapse-top:before { - content: "\f151"; -} -.fa-resize-full:before { - content: "\f152"; -} -.fa-cnyle:before, -.fa-cny:before { - content: "\f158"; -} -.fa-sort-by-alphabet:before { - content: "\f15d"; -} -.fa-sort-by-alphabet-alt:before { - content: "\f15e"; -} -.fa-sort-by-attributes:before { - content: "\f160"; -} -.fa-sort-by-attributes-alt:before { - content: "\f161"; -} -.fa-sort-by-order:before { - content: "\f162"; -} -.fa-sort-by-order-alt:before { - content: "\f163"; -} -.fa-youtube-sign:before { - content: "\f166"; -} -.fa-xing-sign:before { - content: "\f169"; -} -.fa-stackexchange:before { - content: "\f16c"; -} -.fa-bitbucket-sign:before { - content: "\f172"; -} -.fa-tumblr-sign:before { - content: "\f174"; -} -.fa-sun:before { - content: "\f185"; -} -.fa-moon:before { - content: "\f186"; -} -.fa-expand-alt:before { - content: "\f196"; -} diff -Nru asciidoctor-1.5.5/CONTRIBUTING.adoc asciidoctor-2.0.10/CONTRIBUTING.adoc --- asciidoctor-1.5.5/CONTRIBUTING.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/CONTRIBUTING.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -13,10 +13,12 @@ :uri-branch-help: {uri-fork-help}#create-branches :uri-pr-help: {uri-help-base}/using-pull-requests :uri-gist: https://gist.github.com +:uri-yard: https://yardoc.org +:uri-tomdoc: http://tomdoc.org == License Agreement -By contributing changes to this repository, you agree to license your contributions under the <>. +By contributing changes to this repository, you agree to license your contributions under the MIT license. This ensures your contributions have the same license as the project and that the community is free to use your contributions. You also assert that you are the original author of the work that you are contributing unless otherwise stated. @@ -33,8 +35,8 @@ == Submitting a Pull Request . {uri-fork-help}[Fork the repository]. -. Run `bundle` to install development dependencies. - * If the `bundle` command is not available, run `gem install bundler` to install it. +. Run `NOKOGIRI_USE_SYSTEM_LIBRARIES=1 bundle` to install development dependencies. + - If the `bundle` command is not available, run `gem install bundler` to install it. . {uri-branch-help}[Create a topic branch] (preferably using the pattern `issue-XYZ`, where `XYZ` is the issue number). . Add tests for your unimplemented feature or bug fix. (See <>) . Run `bundle exec rake` to run the tests. @@ -163,6 +165,18 @@ $ asciidoctor-dev README.adoc +== Building the API Documentation + +The API documentation is written in the {uri-tomdoc}[TomDoc] dialect and built using {uri-yard}[Yard]. + +The options for Yard are configured in the [.path]_.yardopts_ file at the root of the project. + +To build the API documentation locally, run the following command: + + $ bundle exec yard + +The documentation will be built into the [.path]_rdoc_ folder. + == Supporting Additional Ruby Versions If you would like this library to support another Ruby version, you may volunteer to be a maintainer. diff -Nru asciidoctor-1.5.5/data/locale/attributes.adoc asciidoctor-2.0.10/data/locale/attributes.adoc --- asciidoctor-1.5.5/data/locale/attributes.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -1,470 +1,11 @@ -// This file provides translations for all built-in attributes in Asciidoctor that output localized content. -// See http://asciidoctor.org/docs/user-manual/#customizing-built-in-labels to learn how to use it. +// This directory provides translations for all built-in attributes in Asciidoctor that emit translatable strings. +// See http://asciidoctor.org/docs/user-manual/#customizing-labels to learn how to apply this file. // -// NOTE: Please use a line comment in front of the listing-caption and preface-title entries. -// These attributes are optional and not set by default. +// If you're introducing a new translation, create a file named attributes-.adoc, where is the IANA subtag for the language. +// Next, assign a translation for each attribute, using attributes-en.adoc as a reference. // -// IMPORTANT: Do not add any blank lines. +// IMPORTANT: Do not include any blank lines in the transation file. // -// Arabic translation, courtesy of Aboullaite Mohammed -ifeval::["{lang}" == "ar"] -:appendix-caption: ملحق -:caution-caption: تنبيه -:example-caption: مثال -:figure-caption: الشكل -:important-caption: مهم -:last-update-label: اخر تحديث -//:listing-caption: قائمة -:manname-title: اسم -:note-caption: ملاحظة -//:preface-title: تمهيد -:table-caption: جدول -:tip-caption: تلميح -:toc-title: فهرس -:untitled-label: بدون عنوان -:version-label: نسخة -:warning-caption: تحذير -endif::[] -// -// Bulgarian translation, courtesy of Ivan St. Ivanov -ifeval::["{lang}" == "bg"] -:appendix-caption: Приложение -:caution-caption: Внимание -:example-caption: Пример -:figure-caption: Фигура -:important-caption: Важно -:last-update-label: Последно обновен -//:listing-caption: Листинг -:manname-title: ИМЕ -:note-caption: Забележка -//:preface-title: Предговор -:table-caption: Таблица -:tip-caption: Подсказка -:toc-title: Съдържание -:untitled-label: Без заглавие -:version-label: Версия -:warning-caption: Внимание -endif::[] -// -// Catalan translation, courtesy of Abel Salgado Romero and Alex Soto -ifeval::["{lang}" == "ca"] -:appendix-caption: Apendix -:caution-caption: Atenció -:example-caption: Exemple -:figure-caption: Figura -:important-caption: Important -:last-update-label: Última actualització -//:listing-caption: Llista -:manname-title: NOM -:note-caption: Nota -//:preface-title: Prefaci -:table-caption: Taula -:tip-caption: Suggeriment -:toc-title: Índex -:untitled-label: Sense títol -:version-label: Versió -:warning-caption: Advertència -endif::[] -// -// Danish translation, courtesy of Max Rydahl Andersen -ifeval::["{lang}" == "da"] -:appendix-caption: Appendix -:caution-caption: Forsigtig -:example-caption: Eksempel -:figure-caption: Figur -:important-caption: Vigtig -:last-update-label: Sidst opdateret -:listing-caption: List -:manname-title: NAVN -:note-caption: Notat -//:preface-title: -:table-caption: Tabel -:tip-caption: Tips -:toc-title: Indholdsfortegnelse -:untitled-label: Unavngivet -:version-label: Version -:warning-caption: Advarsel -endif::[] -// -// German translation, courtesy of Florian Wilhelm -ifeval::["{lang}" == "de"] -:appendix-caption: Anhang -:caution-caption: Achtung -:example-caption: Beispiel -:figure-caption: Abbildung -:important-caption: Wichtig -:last-update-label: Zuletzt aktualisiert -//:listing-caption: Listing -:manname-title: BEZEICHNUNG -:note-caption: Anmerkung -//:preface-title: Vorwort -:table-caption: Tabelle -:tip-caption: Hinweis -:toc-title: Inhalt -:untitled-label: Ohne Titel -:version-label: Version -:warning-caption: Warnung -endif::[] -// -// Spanish translation, courtesy of Eddú Meléndez -ifeval::["{lang}" == "es"] -:appendix-caption: Apéndice -:caution-caption: Precaución -:example-caption: Ejemplo -:figure-caption: Figura -:important-caption: Importante -:last-update-label: Ultima actualización -//:listing-caption: Lista -:manname-title: NOMBRE -:note-caption: Nota -//:preface-title: Prefacio -:table-caption: Tabla -:tip-caption: Sugerencia -:toc-title: Tabla de Contenido -:untitled-label: Sin título -:version-label: Versión -:warning-caption: Aviso -endif::[] -// -// Persian (Farsi) translation, courtesy of Shahryar Eivazzadeh -ifeval::["{lang}" == "fa"] -:appendix-caption: پیوست -:caution-caption: گوشزد -:example-caption: نمونه -:figure-caption: نمودار -:important-caption: مهم -:last-update-label: آخرین به روز رسانی -//:listing-caption: فهرست -:manname-title: نام -:note-caption: یادداشت -//:preface-title: پیشگفتار -:table-caption: جدول -:tip-caption: نکته -:toc-title: فهرست مطالب -:untitled-label: بی‌نام -:version-label: نگارش -:warning-caption: هشدار -endif::[] -// -// Finnish translation by Tero Hänninen -ifeval::["{lang}" == "fi"] -:appendix-caption: Liitteet -:caution-caption: Huom -:example-caption: Esimerkki -:figure-caption: Kuvio -:important-caption: Tärkeää -:last-update-label: Viimeksi päivitetty -//:listing-caption: Listaus -:manname-title: NIMI -:note-caption: Huomio -//:preface-title: Esipuhe -:table-caption: Taulukko -:tip-caption: Vinkki -:toc-title: Sisällysluettelo -:untitled-label: Nimetön -:version-label: Versio -:warning-caption: Varoitus -endif::[] -// -// French translation, courtesy of Nicolas Comet -ifeval::["{lang}" == "fr"] -:appendix-caption: Appendice -:caution-caption: Avertissement -:example-caption: Exemple -:figure-caption: Figure -:important-caption: Important -:last-update-label: Dernière mise à jour -//:listing-caption: Liste -:manname-title: NOM -:note-caption: Note -//:preface-title: Préface -:table-caption: Tableau -:tip-caption: Astuce -:toc-title: Table des matières -:untitled-label: Sans titre -:version-label: Version -:warning-caption: Attention -endif::[] -// -// Hungarian translation, courtesy of István Pató -ifeval::["{lang}" == "hu"] -:appendix-caption: függelék -:caution-caption: Figyelmeztetés -:example-caption: Példa -:figure-caption: Ábra -:important-caption: Fontos -:last-update-label: Utolsó frissítés -//:listing-caption: Lista -:manname-title: NÉV -:note-caption: Megjegyzés -//:preface-title: Előszó -:table-caption: Táblázat -:tip-caption: Tipp -:toc-title: Tartalomjegyzék -:untitled-label: Névtelen -:version-label: Verzió -:warning-caption: Figyelem -endif::[] -// -// Italian translation, courtesy of Marco Ciampa -ifeval::["{lang}" == "it"] -:appendix-caption: Appendice -:caution-caption: Attenzione -:chapter-label: Capitolo -:example-caption: Esempio -:figure-caption: Figura -:important-caption: Importante -:last-update-label: Ultimo aggiornamento -//:listing-caption: Elenco -:manname-title: NOME -:note-caption: Nota -//:preface-title: Prefazione -:table-caption: Tabella -:tip-caption: Suggerimento -:toc-title: Indice -:untitled-label: Senza titolo -:version-label: Versione -:warning-caption: Attenzione -endif::[] -// -// Japanese translation, courtesy of Takayuki Konishi -ifeval::["{lang}" == "ja"] -:appendix-caption: 付録 -:caution-caption: 注意 -:example-caption: 例 -:figure-caption: 図 -:important-caption: 重要 -:last-update-label: 最終更新 -//:listing-caption: リスト -:manname-title: 名前 -:note-caption: 注記 -//:preface-title: まえがき -:table-caption: 表 -:tip-caption: ヒント -:toc-title: 目次 -:untitled-label: 無題 -:version-label: バージョン -:warning-caption: 警告 -endif::[] -// -// Korean translation, courtesy of Sungsik Nam -ifeval::["{lang}" == "kr"] -:appendix-caption: 부록 -:caution-caption: 주의 -:example-caption: 예시 -:figure-caption: 그림 -:important-caption: 중요 -:last-update-label: 마지막 업데이트 -//:listing-caption: 목록 -:manname-title: 이름 -:note-caption: 노트 -//:preface-title: 머리말 -:table-caption: 표 -:tip-caption: 힌트 -:toc-title: 차례 -:untitled-label: 익명 -:version-label: 버전 -:warning-caption: 경고 -endif::[] -// -// Dutch translation, courtesy of Roel Van Steenberghe -ifeval::["{lang}" == "nl"] -:appendix-caption: Bijlage -:caution-caption: Opgelet -:example-caption: Voorbeeld -:figure-caption: Figuur -:important-caption: Belangrijk -:last-update-label: Laatste aanpassing -//:listing-caption: Lijst -:manname-title: NAAM -:note-caption: Noot -//:preface-title: Inleiding -:table-caption: Tabel -:tip-caption: Tip -:toc-title: Ínhoudsopgave -:untitled-label: Naamloos -:version-label: Versie -:warning-caption: Waarschuwing -endif::[] -// -// Norwegian, courtesy of Aslak Knutsen -ifeval::["{lang}" == "no"] -:appendix-caption: Vedlegg -:caution-caption: Forsiktig -:example-caption: Eksempel -:figure-caption: Figur -:important-caption: Viktig -:last-update-label: Sist oppdatert -//:listing-caption: -:manname-title: NAVN -:note-caption: Notat -//:preface-title: -:table-caption: Tabell -:tip-caption: Tips -:toc-title: Innholdsfortegnelse -:untitled-label: Navnløs -:version-label: Versjon -:warning-caption: Advarsel -endif::[] -// -// Portuguese translation, courtesy of Roberto Cortez -ifeval::["{lang}" == "pt"] -:appendix-caption: Apêndice -:caution-caption: Atenção -:example-caption: Exemplo -:figure-caption: Figura -:important-caption: Importante -:last-update-label: Última actualização -//:listing-caption: Listagem -:manname-title: NOME -:note-caption: Nota -//:preface-title: Prefácio -:table-caption: Tabela -:tip-caption: Sugestão -:toc-title: Índice -:untitled-label: Sem título -:version-label: Versão -:warning-caption: Aviso -endif::[] -// -// Brazilian Portuguese translation, courtesy of Rafael Pestano -ifeval::["{lang}" == "pt_BR"] -:appendix-caption: Apêndice -:caution-caption: Cuidado -:example-caption: Exemplo -:figure-caption: Figura -:important-caption: Importante -:last-update-label: Última atualização -//:listing-caption: Listagem -:manname-title: NOME -:note-caption: Nota -//:preface-title: Prefácio -:table-caption: Tabela -:tip-caption: Dica -:toc-title: Índice -:untitled-label: Sem título -:version-label: Versão -:warning-caption: Aviso -endif::[] -// -// Russian translation, courtesy of Alexander Zobkov -ifeval::["{lang}" == "ru"] -:appendix-caption: Приложение -:caution-caption: Внимание -:example-caption: Пример -:figure-caption: Рисунок -:important-caption: Важно -:last-update-label: Последний раз обновлено -//:listing-caption: Листинг -:manname-title: НАЗВАНИЕ -:note-caption: Примечание -//:preface-title: Предисловие -:table-caption: Таблица -:tip-caption: Подсказка -:toc-title: Содержание -:untitled-label: Без названия -:version-label: Версия -:warning-caption: Предупреждение -endif::[] -// -// Serbian Cyrillic translation, courtesy of Bojan Stipic -ifeval::["{lang}" == "sr"] -:appendix-caption: Додатак -:caution-caption: Опрез -//:chapter-label: Поглавље -:example-caption: Пример -:figure-caption: Слика -:important-caption: Важно -:last-update-label: Последње ажурирано -//:listing-caption: Списак -:manname-title: НАЗИВ -:note-caption: Белешка -//:preface-title: Предговор -:table-caption: Табела -:tip-caption: Савет -:toc-title: Садржај -:untitled-label: Без назива -:version-label: Верзија -:warning-caption: Упозорење -endif::[] -// -// Serbian Latin translation, courtesy of Bojan Stipic -ifeval::["{lang}" == "sr_Latn"] -:appendix-caption: Dodatak -:caution-caption: Oprez -//:chapter-label: Poglavlje -:example-caption: Primer -:figure-caption: Slika -:important-caption: Važno -:last-update-label: Poslednje ažurirano -//:listing-caption: Spisak -:manname-title: NAZIV -:note-caption: Beleška -//:preface-title: Predgovor -:table-caption: Tabela -:tip-caption: Savet -:toc-title: Sadržaj -:untitled-label: Bez naziva -:version-label: Verzija -:warning-caption: Upozorenje -endif::[] -// -// Turkish translation, courtesy of Rahman Usta -ifeval::["{lang}" == "tr"] -:appendix-caption: Ek bölüm -:caution-caption: Dikkat -:example-caption: Örnek -:figure-caption: Görsel -:important-caption: Önemli -:last-update-label: Son güncelleme -//:listing-caption: Listeleme -:manname-title: İSİM -:note-caption: Not -//:preface-title: Ön söz -:table-caption: Tablo -:tip-caption: İpucu -:toc-title: İçindekiler -:untitled-label: İsimsiz -:version-label: Versiyon -:warning-caption: Uyarı -endif::[] -// -// Simplified Chinese translation, courtesy of John Dong -ifeval::["{lang}" == "zh_CN"] -:appendix-caption: 附录 -:caution-caption: 注意 -:example-caption: 示例 -:figure-caption: 图表 -:important-caption: 重要 -:last-update-label: 最后更新 -//:listing-caption: 列表 -:manname-title: 名称 -:note-caption: 笔记 -//:preface-title: 序言 -:table-caption: 表格 -:tip-caption: 提示 -:toc-title: 目录 -:untitled-label: 暂无标题 -:version-label: 版本 -:warning-caption: 警告 -endif::[] -// -// Traditional Chinese translation, courtesy of John Dong -ifeval::["{lang}" == "zh_TW"] -:appendix-caption: 附錄 -:caution-caption: 注意 -:example-caption: 示例 -:figure-caption: 圖表 -:important-caption: 重要 -:last-update-label: 最後更新 -//:listing-caption: 列表 -:manname-title: 名稱 -:note-caption: 筆記 -//:preface-title: 序言 -:table-caption: 表格 -:tip-caption: 提示 -:toc-title: 目錄 -:untitled-label: 暫無標題 -:version-label: 版本 -:warning-caption: 警告 -endif::[] +// NOTE: Please wrap the listing-caption and preface-title entries in a preprocessor conditional directive. +// These attributes should only be updated if set explicitly by the user. +ifdef::lang[include::attributes-{lang}.adoc[]] diff -Nru asciidoctor-1.5.5/data/locale/attributes-ar.adoc asciidoctor-2.0.10/data/locale/attributes-ar.adoc --- asciidoctor-1.5.5/data/locale/attributes-ar.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-ar.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Arabic translation, courtesy of Aboullaite Mohammed +:appendix-caption: ملحق +:appendix-refsig: {appendix-caption} +:caution-caption: تنبيه +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: مثال +:figure-caption: الشكل +:important-caption: مهم +:last-update-label: اخر تحديث +ifdef::listing-caption[:listing-caption: قائمة] +ifdef::manname-title[:manname-title: اسم] +:note-caption: ملاحظة +//:part-refsig: ??? +ifdef::preface-title[:preface-title: تمهيد] +//:section-refsig: ??? +:table-caption: جدول +:tip-caption: تلميح +:toc-title: فهرس +:untitled-label: بدون عنوان +:version-label: نسخة +:warning-caption: تحذير diff -Nru asciidoctor-1.5.5/data/locale/attributes-bg.adoc asciidoctor-2.0.10/data/locale/attributes-bg.adoc --- asciidoctor-1.5.5/data/locale/attributes-bg.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-bg.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Bulgarian translation, courtesy of Ivan St. Ivanov +:appendix-caption: Приложение +:appendix-refsig: {appendix-caption} +:caution-caption: Внимание +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Пример +:figure-caption: Фигура +:important-caption: Важно +:last-update-label: Последно обновен +ifdef::listing-caption[:listing-caption: Листинг] +ifdef::manname-title[:manname-title: Име] +:note-caption: Забележка +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Предговор] +//:section-refsig: ??? +:table-caption: Таблица +:tip-caption: Подсказка +:toc-title: Съдържание +:untitled-label: Без заглавие +:version-label: Версия +:warning-caption: Внимание diff -Nru asciidoctor-1.5.5/data/locale/attributes-ca.adoc asciidoctor-2.0.10/data/locale/attributes-ca.adoc --- asciidoctor-1.5.5/data/locale/attributes-ca.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-ca.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Catalan translation, courtesy of Abel Salgado Romero and Alex Soto +:appendix-caption: Apendix +:appendix-refsig: {appendix-caption} +:caution-caption: Atenció +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Exemple +:figure-caption: Figura +:important-caption: Important +:last-update-label: Última actualització +ifdef::listing-caption[:listing-caption: Llista] +ifdef::manname-title[:manname-title: Nom] +:note-caption: Nota +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Prefaci] +//:section-refsig: ??? +:table-caption: Taula +:tip-caption: Suggeriment +:toc-title: Índex +:untitled-label: Sense títol +:version-label: Versió +:warning-caption: Advertència diff -Nru asciidoctor-1.5.5/data/locale/attributes-cs.adoc asciidoctor-2.0.10/data/locale/attributes-cs.adoc --- asciidoctor-1.5.5/data/locale/attributes-cs.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-cs.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// czech translation, for reference only; matches the built-in behavior of core +:appendix-caption: Příloha +:appendix-refsig: {appendix-caption} +:caution-caption: Upozornění +:chapter-label: Kapitola +:chapter-refsig: {chapter-label} +:example-caption: Příklad +:figure-caption: Obrázek +:important-caption: Důležité +:last-update-label: Změněno +ifdef::listing-caption[:listing-caption: Seznam] +ifdef::manname-title[:manname-title: Název] +:note-caption: Poznámka +:part-refsig: Část +ifdef::preface-title[:preface-title: Úvod] +:section-refsig: Oddíl +:table-caption: Tabulka +:tip-caption: Tip +:toc-title: Obsah +:untitled-label: Nepojmenovaný +:version-label: Verze +:warning-caption: Varování diff -Nru asciidoctor-1.5.5/data/locale/attributes-da.adoc asciidoctor-2.0.10/data/locale/attributes-da.adoc --- asciidoctor-1.5.5/data/locale/attributes-da.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-da.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Danish translation, courtesy of Max Rydahl Andersen +:appendix-caption: Appendix +:appendix-refsig: {appendix-caption} +:caution-caption: Forsigtig +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Eksempel +:figure-caption: Figur +:important-caption: Vigtig +:last-update-label: Sidst opdateret +ifdef::listing-caption[:listing-caption: List] +ifdef::manname-title[:manname-title: Navn] +:note-caption: Notat +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Forord] +//:section-refsig: ??? +:table-caption: Tabel +:tip-caption: Tips +:toc-title: Indholdsfortegnelse +:untitled-label: Unavngivet +:version-label: Version +:warning-caption: Advarsel diff -Nru asciidoctor-1.5.5/data/locale/attributes-de.adoc asciidoctor-2.0.10/data/locale/attributes-de.adoc --- asciidoctor-1.5.5/data/locale/attributes-de.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-de.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,23 @@ +// German translation, courtesy of Florian Wilhelm +:appendix-caption: Anhang +:appendix-refsig: {appendix-caption} +:caution-caption: Achtung +:chapter-label: Kapitel +:chapter-refsig: {chapter-label} +:example-caption: Beispiel +:figure-caption: Abbildung +:important-caption: Wichtig +:last-update-label: Zuletzt aktualisiert +ifdef::listing-caption[:listing-caption: Listing] +ifdef::manname-title[:manname-title: Bezeichnung] +:note-caption: Anmerkung +:part-label: Teil +:part-refsig: {part-label} +ifdef::preface-title[:preface-title: Vorwort] +:section-refsig: Abschnitt +:table-caption: Tabelle +:tip-caption: Hinweis +:toc-title: Inhaltsverzeichnis +:untitled-label: Ohne Titel +:version-label: Version +:warning-caption: Warnung diff -Nru asciidoctor-1.5.5/data/locale/attributes-en.adoc asciidoctor-2.0.10/data/locale/attributes-en.adoc --- asciidoctor-1.5.5/data/locale/attributes-en.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-en.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,23 @@ +// English translation, for reference only; matches the built-in behavior of core +:appendix-caption: Appendix +:appendix-refsig: {appendix-caption} +:caution-caption: Caution +:chapter-label: Chapter +:chapter-refsig: {chapter-label} +:example-caption: Example +:figure-caption: Figure +:important-caption: Important +:last-update-label: Last updated +ifdef::listing-caption[:listing-caption: Listing] +ifdef::manname-title[:manname-title: Name] +:note-caption: Note +:part-label: Part +:part-refsig: {part-label} +ifdef::preface-title[:preface-title: Preface] +:section-refsig: Section +:table-caption: Table +:tip-caption: Tip +:toc-title: Table of Contents +:untitled-label: Untitled +:version-label: Version +:warning-caption: Warning diff -Nru asciidoctor-1.5.5/data/locale/attributes-es.adoc asciidoctor-2.0.10/data/locale/attributes-es.adoc --- asciidoctor-1.5.5/data/locale/attributes-es.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-es.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Spanish translation, courtesy of Eddú Meléndez +:appendix-caption: Apéndice +:appendix-refsig: {appendix-caption} +:caution-caption: Precaución +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Ejemplo +:figure-caption: Figura +:important-caption: Importante +:last-update-label: Ultima actualización +ifdef::listing-caption[:listing-caption: Lista] +ifdef::manname-title[:manname-title: Nombre] +:note-caption: Nota +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Prefacio] +//:section-refsig: ??? +:table-caption: Tabla +:tip-caption: Sugerencia +:toc-title: Tabla de Contenido +:untitled-label: Sin título +:version-label: Versión +:warning-caption: Aviso diff -Nru asciidoctor-1.5.5/data/locale/attributes-fa.adoc asciidoctor-2.0.10/data/locale/attributes-fa.adoc --- asciidoctor-1.5.5/data/locale/attributes-fa.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-fa.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Persian (Farsi) translation, courtesy of Shahryar Eivazzadeh +:appendix-caption: پیوست +:appendix-refsig: {appendix-caption} +:caution-caption: گوشزد +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: نمونه +:figure-caption: نمودار +:important-caption: مهم +:last-update-label: آخرین به روز رسانی +ifdef::listing-caption[:listing-caption: فهرست] +ifdef::manname-title[:manname-title: نام] +:note-caption: یادداشت +//:part-refsig: ??? +ifdef::preface-title[:preface-title: پیشگفتار] +//:section-refsig: ??? +:table-caption: جدول +:tip-caption: نکته +:toc-title: فهرست مطالب +:untitled-label: بی‌نام +:version-label: نگارش +:warning-caption: هشدار diff -Nru asciidoctor-1.5.5/data/locale/attributes-fi.adoc asciidoctor-2.0.10/data/locale/attributes-fi.adoc --- asciidoctor-1.5.5/data/locale/attributes-fi.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-fi.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Finnish translation by Tero Hänninen +:appendix-caption: Liitteet +:appendix-refsig: {appendix-caption} +:caution-caption: Huom +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Esimerkki +:figure-caption: Kuvio +:important-caption: Tärkeää +:last-update-label: Viimeksi päivitetty +ifdef::listing-caption[:listing-caption: Listaus] +ifdef::manname-title[:manname-title: Nimi] +:note-caption: Huomio +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Esipuhe] +//:section-refsig: ??? +:table-caption: Taulukko +:tip-caption: Vinkki +:toc-title: Sisällysluettelo +:untitled-label: Nimetön +:version-label: Versio +:warning-caption: Varoitus diff -Nru asciidoctor-1.5.5/data/locale/attributes-fr.adoc asciidoctor-2.0.10/data/locale/attributes-fr.adoc --- asciidoctor-1.5.5/data/locale/attributes-fr.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-fr.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// French translation, courtesy of Nicolas Comet +:appendix-caption: Annexe +:appendix-refsig: {appendix-caption} +:caution-caption: Avertissement +//:chapter-label: Chapitre +//:chapter-refsig: {chapter-label} +:example-caption: Exemple +:figure-caption: Figure +:important-caption: Important +:last-update-label: Dernière mise à jour +ifdef::listing-caption[:listing-caption: Liste] +ifdef::manname-title[:manname-title: Nom] +:note-caption: Note +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Préface] +//:section-refsig: ??? +:table-caption: Tableau +:tip-caption: Astuce +:toc-title: Table des matières +:untitled-label: Sans titre +:version-label: Version +:warning-caption: Attention diff -Nru asciidoctor-1.5.5/data/locale/attributes-hu.adoc asciidoctor-2.0.10/data/locale/attributes-hu.adoc --- asciidoctor-1.5.5/data/locale/attributes-hu.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-hu.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Hungarian translation, courtesy of István Pató +:appendix-caption: függelék +:appendix-refsig: {appendix-caption} +:caution-caption: Figyelmeztetés +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Példa +:figure-caption: Ábra +:important-caption: Fontos +:last-update-label: Utolsó frissítés +ifdef::listing-caption[:listing-caption: Lista] +ifdef::manname-title[:manname-title: Név] +:note-caption: Megjegyzés +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Előszó] +//:section-refsig: ??? +:table-caption: Táblázat +:tip-caption: Tipp +:toc-title: Tartalomjegyzék +:untitled-label: Névtelen +:version-label: Verzió +:warning-caption: Figyelem diff -Nru asciidoctor-1.5.5/data/locale/attributes-id.adoc asciidoctor-2.0.10/data/locale/attributes-id.adoc --- asciidoctor-1.5.5/data/locale/attributes-id.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-id.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Bahasa Indonesia, courtesy of Triyan W. Nugroho +:appendix-caption: Lampiran +:appendix-refsig: {appendix-caption} +:caution-caption: Perhatian +:chapter-label: Bab +:chapter-refsig: {chapter-label} +:example-caption: Contoh +:figure-caption: Gambar +:important-caption: Penting +:last-update-label: Pembaruan terakhir +ifdef::listing-caption[:listing-caption: Daftar] +ifdef::manname-title[:manname-title: Nama] +:note-caption: Catatan +//:part-refsig: ??? +//ifdef::preface-title[:preface-title: ???] +//:section-refsig: ??? +:table-caption: Tabel +:tip-caption: Tips +:toc-title: Daftar Isi +:untitled-label: Tak Berjudul +:version-label: Versi +:warning-caption: Peringatan diff -Nru asciidoctor-1.5.5/data/locale/attributes-it.adoc asciidoctor-2.0.10/data/locale/attributes-it.adoc --- asciidoctor-1.5.5/data/locale/attributes-it.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-it.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Italian translation, courtesy of Marco Ciampa +:appendix-caption: Appendice +:appendix-refsig: {appendix-caption} +:caution-caption: Attenzione +:chapter-label: Capitolo +:chapter-refsig: {chapter-label} +:example-caption: Esempio +:figure-caption: Figura +:important-caption: Importante +:last-update-label: Ultimo aggiornamento +ifdef::listing-caption[:listing-caption: Elenco] +ifdef::manname-title[:manname-title: Nome] +:note-caption: Nota +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Prefazione] +//:section-refsig: ??? +:table-caption: Tabella +:tip-caption: Suggerimento +:toc-title: Indice +:untitled-label: Senza titolo +:version-label: Versione +:warning-caption: Attenzione diff -Nru asciidoctor-1.5.5/data/locale/attributes-ja.adoc asciidoctor-2.0.10/data/locale/attributes-ja.adoc --- asciidoctor-1.5.5/data/locale/attributes-ja.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-ja.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Japanese translation, courtesy of Takayuki Konishi +:appendix-caption: 付録 +:appendix-refsig: {appendix-caption} +:caution-caption: 注意 +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: 例 +:figure-caption: 図 +:important-caption: 重要 +:last-update-label: 最終更新 +ifdef::listing-caption[:listing-caption: リスト] +ifdef::manname-title[:manname-title: 名前] +:note-caption: 注記 +//:part-refsig: ??? +ifdef::preface-title[:preface-title: まえがき] +//:section-refsig: ??? +:table-caption: 表 +:tip-caption: ヒント +:toc-title: 目次 +:untitled-label: 無題 +:version-label: バージョン +:warning-caption: 警告 diff -Nru asciidoctor-1.5.5/data/locale/attributes-kr.adoc asciidoctor-2.0.10/data/locale/attributes-kr.adoc --- asciidoctor-1.5.5/data/locale/attributes-kr.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-kr.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Korean translation, courtesy of Sungsik Nam +:appendix-caption: 부록 +:appendix-refsig: {appendix-caption} +:caution-caption: 주의 +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: 예시 +:figure-caption: 그림 +:important-caption: 중요 +:last-update-label: 마지막 업데이트 +ifdef::listing-caption[:listing-caption: 목록] +ifdef::manname-title[:manname-title: 이름] +:note-caption: 노트 +//:part-refsig: ??? +ifdef::preface-title[:preface-title: 머리말] +//:section-refsig: ??? +:table-caption: 표 +:tip-caption: 힌트 +:toc-title: 차례 +:untitled-label: 익명 +:version-label: 버전 +:warning-caption: 경고 diff -Nru asciidoctor-1.5.5/data/locale/attributes-nb.adoc asciidoctor-2.0.10/data/locale/attributes-nb.adoc --- asciidoctor-1.5.5/data/locale/attributes-nb.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-nb.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Norwegian Bokmål, courtesy of Aslak Knutsen , with updates from Karl Ove Hufthammer +:appendix-caption: Vedlegg +:appendix-refsig: {appendix-caption} +:caution-caption: OBS +:chapter-label: Kapittel +:chapter-refsig: {chapter-label} +:example-caption: Eksempel +:figure-caption: Figur +:important-caption: Viktig +:last-update-label: Sist oppdatert +ifdef::listing-caption[:listing-caption: Programkode] +ifdef::manname-title[:manname-title: Navn] +:note-caption: Merk +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Forord] +//:section-refsig: ??? +:table-caption: Tabell +:tip-caption: Tips +:toc-title: Innhold +:untitled-label: Navnløs +:version-label: Versjon +:warning-caption: Advarsel diff -Nru asciidoctor-1.5.5/data/locale/attributes-nl.adoc asciidoctor-2.0.10/data/locale/attributes-nl.adoc --- asciidoctor-1.5.5/data/locale/attributes-nl.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-nl.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Dutch translation, courtesy of Roel Van Steenberghe +:appendix-caption: Bijlage +:appendix-refsig: {appendix-caption} +:caution-caption: Opgelet +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Voorbeeld +:figure-caption: Figuur +:important-caption: Belangrijk +:last-update-label: Laatste aanpassing +ifdef::listing-caption[:listing-caption: Lijst] +ifdef::manname-title[:manname-title: Naam] +:note-caption: Noot +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Inleiding] +//:section-refsig: ??? +:table-caption: Tabel +:tip-caption: Tip +:toc-title: Ínhoudsopgave +:untitled-label: Naamloos +:version-label: Versie +:warning-caption: Waarschuwing diff -Nru asciidoctor-1.5.5/data/locale/attributes-nn.adoc asciidoctor-2.0.10/data/locale/attributes-nn.adoc --- asciidoctor-1.5.5/data/locale/attributes-nn.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-nn.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Norwegian Nynorsk, courtesy of Karl Ove Hufthammer +:appendix-caption: Vedlegg +:appendix-refsig: {appendix-caption} +:caution-caption: OBS +:chapter-label: Kapittel +:chapter-refsig: {chapter-label} +:example-caption: Eksempel +:figure-caption: Figur +:important-caption: Viktig +:last-update-label: Sist oppdatert +ifdef::listing-caption[:listing-caption: Programkode] +ifdef::manname-title[:manname-title: Namn] +:note-caption: Merk +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Forord] +//:section-refsig: ??? +:table-caption: Tabell +:tip-caption: Tips +:toc-title: Innhald +:untitled-label: Namnlaus +:version-label: Versjon +:warning-caption: Åtvaring diff -Nru asciidoctor-1.5.5/data/locale/attributes-pl.adoc asciidoctor-2.0.10/data/locale/attributes-pl.adoc --- asciidoctor-1.5.5/data/locale/attributes-pl.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-pl.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Polish translation, courtesy of Łukasz Dziedziul +:appendix-caption: Dodatek +:appendix-refsig: {appendix-caption} +:caution-caption: Uwaga +:chapter-label: Rozdział +:chapter-refsig: {chapter-label} +:example-caption: Przykład +:figure-caption: Rysunek +:important-caption: Ważne +:last-update-label: Ostatnio zmodyfikowany +//ifdef::listing-caption[:listing-caption: ???] +ifdef::manname-title[:manname-title: Nazwa] +:note-caption: Notka +//:part-refsig: ??? +//ifdef::preface-title[:preface-title: ???] +//:section-refsig: ??? +:table-caption: Tabela +:tip-caption: Sugestia +:toc-title: Spis treści +:untitled-label: Bez tytułu +:version-label: Wersja +:warning-caption: Ostrzeżenie diff -Nru asciidoctor-1.5.5/data/locale/attributes-pt.adoc asciidoctor-2.0.10/data/locale/attributes-pt.adoc --- asciidoctor-1.5.5/data/locale/attributes-pt.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-pt.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Portuguese translation, courtesy of Roberto Cortez +:appendix-caption: Apêndice +:appendix-refsig: {appendix-caption} +:caution-caption: Atenção +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Exemplo +:figure-caption: Figura +:important-caption: Importante +:last-update-label: Última actualização +ifdef::listing-caption[:listing-caption: Listagem] +ifdef::manname-title[:manname-title: Nome] +:note-caption: Nota +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Prefácio] +//:section-refsig: ??? +:table-caption: Tabela +:tip-caption: Sugestão +:toc-title: Índice +:untitled-label: Sem título +:version-label: Versão +:warning-caption: Aviso diff -Nru asciidoctor-1.5.5/data/locale/attributes-pt_BR.adoc asciidoctor-2.0.10/data/locale/attributes-pt_BR.adoc --- asciidoctor-1.5.5/data/locale/attributes-pt_BR.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-pt_BR.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Brazilian Portuguese translation, courtesy of Rafael Pestano +:appendix-caption: Apêndice +:appendix-refsig: {appendix-caption} +:caution-caption: Cuidado +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Exemplo +:figure-caption: Figura +:important-caption: Importante +:last-update-label: Última atualização +ifdef::listing-caption[:listing-caption: Listagem] +ifdef::manname-title[:manname-title: Nome] +:note-caption: Nota +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Prefácio] +//:section-refsig: ??? +:table-caption: Tabela +:tip-caption: Dica +:toc-title: Índice +:untitled-label: Sem título +:version-label: Versão +:warning-caption: Aviso diff -Nru asciidoctor-1.5.5/data/locale/attributes-ro.adoc asciidoctor-2.0.10/data/locale/attributes-ro.adoc --- asciidoctor-1.5.5/data/locale/attributes-ro.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-ro.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Romanian translation, courtesy of Vitalie Lazu +:appendix-caption: Apendix +:appendix-refsig: {appendix-caption} +:caution-caption: Precauție +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Exemplu +:figure-caption: Figură +:important-caption: Important +:last-update-label: Ultima actualizare +ifdef::listing-caption[:listing-caption: Listare] +ifdef::manname-title[:manname-title: Nume] +:note-caption: Notă +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Prefață] +//:section-refsig: ??? +:table-caption: Tabela +:tip-caption: Sfat +:toc-title: Cuprins +:untitled-label: Fără denumire +:version-label: Versiunea +:warning-caption: Atenție diff -Nru asciidoctor-1.5.5/data/locale/attributes-ru.adoc asciidoctor-2.0.10/data/locale/attributes-ru.adoc --- asciidoctor-1.5.5/data/locale/attributes-ru.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-ru.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Russian translation, courtesy of Alexander Zobkov +:appendix-caption: Приложение +:appendix-refsig: {appendix-caption} +:caution-caption: Внимание +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Пример +:figure-caption: Рисунок +:important-caption: Важно +:last-update-label: Последний раз обновлено +ifdef::listing-caption[:listing-caption: Листинг] +ifdef::manname-title[:manname-title: Название] +:note-caption: Примечание +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Предисловие] +//:section-refsig: ??? +:table-caption: Таблица +:tip-caption: Подсказка +:toc-title: Содержание +:untitled-label: Без названия +:version-label: Версия +:warning-caption: Предупреждение diff -Nru asciidoctor-1.5.5/data/locale/attributes-sr.adoc asciidoctor-2.0.10/data/locale/attributes-sr.adoc --- asciidoctor-1.5.5/data/locale/attributes-sr.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-sr.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,23 @@ +// Serbian Cyrillic translation, courtesy of Bojan Stipic +:appendix-caption: Додатак +:appendix-refsig: {appendix-caption} +:caution-caption: Опрез +:chapter-label: Поглавље +:chapter-refsig: {chapter-label} +:example-caption: Пример +:figure-caption: Слика +:important-caption: Важно +:last-update-label: Последње ажурирано +ifdef::listing-caption[:listing-caption: Листинг] +ifdef::manname-title[:manname-title: Назив] +:note-caption: Белешка +:part-label: Део +:part-refsig: {part-label} +ifdef::preface-title[:preface-title: Предговор] +:section-refsig: Секција +:table-caption: Табела +:tip-caption: Савет +:toc-title: Садржај +:untitled-label: Без назива +:version-label: Верзија +:warning-caption: Упозорење diff -Nru asciidoctor-1.5.5/data/locale/attributes-sr_Latn.adoc asciidoctor-2.0.10/data/locale/attributes-sr_Latn.adoc --- asciidoctor-1.5.5/data/locale/attributes-sr_Latn.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-sr_Latn.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,23 @@ +// Serbian Latin translation, courtesy of Bojan Stipic +:appendix-caption: Dodatak +:appendix-refsig: {appendix-caption} +:caution-caption: Oprez +:chapter-label: Poglavlje +:chapter-refsig: {chapter-label} +:example-caption: Primer +:figure-caption: Slika +:important-caption: Važno +:last-update-label: Poslednje ažurirano +ifdef::listing-caption[:listing-caption: Listing] +ifdef::manname-title[:manname-title: Naziv] +:note-caption: Beleška +:part-label: Deo +:part-refsig: {part-label} +ifdef::preface-title[:preface-title: Predgovor] +:section-refsig: Sekcija +:table-caption: Tabela +:tip-caption: Savet +:toc-title: Sadržaj +:untitled-label: Bez naziva +:version-label: Verzija +:warning-caption: Upozorenje diff -Nru asciidoctor-1.5.5/data/locale/attributes-sv.adoc asciidoctor-2.0.10/data/locale/attributes-sv.adoc --- asciidoctor-1.5.5/data/locale/attributes-sv.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-sv.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,23 @@ +// Swedish translation, Jonas Björk +:appendix-caption: Bilaga +:appendix-refsig: {appendix-caption} +:caution-caption: Var uppmärksam +:chapter-label: Kapitel +:chapter-refsig: {chapter-label} +:example-caption: Exempel +:figure-caption: Figur +:important-caption: Viktigt +:last-update-label: Senast uppdaterad +ifdef::listing-caption[:listing-caption: Lista] +ifdef::manname-title[:manname-title: Namn] +:note-caption: Notera +:part-label: Del +:part-refsig: {part-label} +ifdef::preface-title[:preface-title: Förord] +:section-refsig: Avsnitt +:table-caption: Tabell +:tip-caption: Tips +:toc-title: Innehållsförteckning +:untitled-label: Odöpt +:version-label: Version +:warning-caption: Varning diff -Nru asciidoctor-1.5.5/data/locale/attributes-tr.adoc asciidoctor-2.0.10/data/locale/attributes-tr.adoc --- asciidoctor-1.5.5/data/locale/attributes-tr.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-tr.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Turkish translation, courtesy of Rahman Usta +:appendix-caption: Ek bölüm +:appendix-refsig: {appendix-caption} +:caution-caption: Dikkat +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Örnek +:figure-caption: Görsel +:important-caption: Önemli +:last-update-label: Son güncelleme +ifdef::listing-caption[:listing-caption: Listeleme] +ifdef::manname-title[:manname-title: İsim] +:note-caption: Not +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Ön söz] +//:section-refsig: ??? +:table-caption: Tablo +:tip-caption: İpucu +:toc-title: İçindekiler +:untitled-label: İsimsiz +:version-label: Versiyon +:warning-caption: Uyarı diff -Nru asciidoctor-1.5.5/data/locale/attributes-uk.adoc asciidoctor-2.0.10/data/locale/attributes-uk.adoc --- asciidoctor-1.5.5/data/locale/attributes-uk.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-uk.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Ukrainian translation, courtesy of Kyrylo Yatsenko +:appendix-caption: Додаток +:appendix-refsig: {appendix-caption} +:caution-caption: Обережно +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: Приклад +:figure-caption: Зображення +:important-caption: Важливо +:last-update-label: Востаннє оновлено +ifdef::listing-caption[:listing-caption: Лістинг] +ifdef::manname-title[:manname-title: Назва] +:note-caption: Зауваження +//:part-refsig: ??? +ifdef::preface-title[:preface-title: Передмова] +//:section-refsig: ??? +:table-caption: Таблиця +:tip-caption: Підказка +:toc-title: Зміст +:untitled-label: Без назви +:version-label: Версія +:warning-caption: Попередження diff -Nru asciidoctor-1.5.5/data/locale/attributes-zh_CN.adoc asciidoctor-2.0.10/data/locale/attributes-zh_CN.adoc --- asciidoctor-1.5.5/data/locale/attributes-zh_CN.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-zh_CN.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Simplified Chinese translation, courtesy of John Dong +:appendix-caption: 附录 +:appendix-refsig: {appendix-caption} +:caution-caption: 注意 +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: 示例 +:figure-caption: 图表 +:important-caption: 重要 +:last-update-label: 最后更新 +ifdef::listing-caption[:listing-caption: 列表] +ifdef::manname-title[:manname-title: 名称] +:note-caption: 笔记 +//:part-refsig: ??? +ifdef::preface-title[:preface-title: 序言] +//:section-refsig: ??? +:table-caption: 表格 +:tip-caption: 提示 +:toc-title: 目录 +:untitled-label: 暂无标题 +:version-label: 版本 +:warning-caption: 警告 diff -Nru asciidoctor-1.5.5/data/locale/attributes-zh_TW.adoc asciidoctor-2.0.10/data/locale/attributes-zh_TW.adoc --- asciidoctor-1.5.5/data/locale/attributes-zh_TW.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/locale/attributes-zh_TW.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +// Traditional Chinese translation, courtesy of John Dong +:appendix-caption: 附錄 +:appendix-refsig: {appendix-caption} +:caution-caption: 注意 +//:chapter-label: ??? +//:chapter-refsig: {chapter-label} +:example-caption: 示例 +:figure-caption: 圖表 +:important-caption: 重要 +:last-update-label: 最後更新 +ifdef::listing-caption[:listing-caption: 列表] +ifdef::manname-title[:manname-title: 名稱] +:note-caption: 筆記 +//:part-refsig: ??? +ifdef::preface-title[:preface-title: 序言] +//:section-refsig: ??? +:table-caption: 表格 +:tip-caption: 提示 +:toc-title: 目錄 +:untitled-label: 暫無標題 +:version-label: 版本 +:warning-caption: 警告 diff -Nru asciidoctor-1.5.5/data/reference/syntax.adoc asciidoctor-2.0.10/data/reference/syntax.adoc --- asciidoctor-1.5.5/data/reference/syntax.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/data/reference/syntax.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,289 @@ += AsciiDoc Syntax +:icons: font +:stem: +:toc: left +:url-docs: https://asciidoctor.org/docs +:url-gem: https://rubygems.org/gems/asciidoctor + +A brief reference of the most commonly used AsciiDoc syntax. +You can find the full documentation for the AsciiDoc syntax at {url-docs}. + +== Paragraphs + +A normal paragraph. +Line breaks are not preserved. +// line comments, which are lines that start with //, are skipped + +A blank line separates paragraphs. + +[%hardbreaks] +This paragraph carries the `hardbreaks` option. +Notice how line breaks are now preserved. + + An indented (literal) paragraph disables text formatting, + preserves spaces and line breaks, and is displayed in a + monospaced font. + +[sidebar#id.role] +A style, ID, and/or role gives a paragraph (or block) special meaning, like this sidebar. + +NOTE: An admonition paragraph, like this note, grabs the reader's attention. + +TIP: Convert this document using the `asciidoctor` command to see the output produced from it. + +== Text Formatting +:hardbreaks: + +.Constrained (applied at word boundaries) +*strong importance* (aka bold) +_stress emphasis_ (aka italic) +`monospaced` (aka typewriter text) +"`double`" and '`single`' typographic quotes ++passthrough text+ (substitutions disabled) +`+literal text+` (monospaced with substitutions disabled) + +.Unconstrained (applied anywhere) +**C**reate+**R**ead+**U**pdate+**D**elete +fan__freakin__tastic +``mono``culture + +.Replacements +A long time ago in a galaxy far, far away... +(C) 1976 Arty Artisan +I believe I shall--no, actually I won't. + +.Macros +// where c=specialchars, q=quotes, a=attributes, r=replacements, m=macros, p=post_replacements, etc. +The European icon:flag[role=blue] is blue & contains pass:[************] arranged in a icon:circle-o[role=yellow]. +The pass:c[->] operator is often referred to as the stabby lambda. +Since `pass:[++]` has strong priority in AsciiDoc, you can rewrite pass:c,a,r[C++ => C{pp}]. +// activate stem support by adding `:stem:` to the document header +stem:[sqrt(4) = 2] + +:!hardbreaks: +== Attributes + + // define attributes in the document header; must be flush with left margin + :name: value + +You can download and install Asciidoctor {asciidoctor-version} from {url-gem}. +C{pp} is not required, only Ruby. +Use a leading backslash to output a word enclosed in curly braces, like \{name}. + +== Links + +[%hardbreaks] +https://example.org/page[A webpage] +link:../path/to/file.txt[A local file] +xref:document.adoc[A sibling document] +mailto:hello@example.org[Email to say hello!] + +== Anchors + +[[idname,reference text]] +// or written using normal block attributes as `[#idname,reftext=reference text]` +A paragraph (or any block) with an anchor (aka ID) and reftext. + +See <> or <>. + +xref:document.adoc#idname[Jumps to anchor in another document]. + +This paragraph has a footnote.footnote:[This is the text of the footnote.] + +== Lists + +=== Unordered + +* level 1 +** level 2 +*** level 3 +**** level 4 +***** etc. +* back at level 1 ++ +Attach a block or paragraph to a list item using a list continuation (which you can enclose in an open block). + +.Some Authors +[circle] +- Edgar Allen Poe +- Sheri S. Tepper +- Bill Bryson + +=== Ordered + +. Step 1 +. Step 2 +.. Step 2a +.. Step 2b +. Step 3 + +.Remember your Roman numerals? +[upperroman] +. is one +. is two +. is three + +=== Checklist + +* [x] checked +* [ ] not checked + +=== Callout + +// enable callout bubbles by adding `:icons: font` to the document header +[,ruby] +---- +puts 'Hello, World!' # <1> +---- +<1> Prints `Hello, World!` to the console. + +=== Description + +first term:: description of first term +second term:: +description of second term + +== Document Structure + +=== Header + + // header must be flush with left margin + = Document Title + Author Name + v1.0, 2019-01-01 + +=== Sections + + // must be flush with left margin + = Document Title (Level 0) + == Level 1 + === Level 2 + ==== Level 3 + ===== Level 4 + ====== Level 5 + == Back at Level 1 + +=== Includes + + // must be flush with left margin + include::basics.adoc[] + + // define -a allow-uri-read to allow content to be read from URI + include::https://example.org/installation.adoc[] + +== Blocks + +-- +open - a general-purpose content wrapper; useful for enclosing content to attach to a list item +-- + +// recognized types include CAUTION, IMPORTANT, NOTE, TIP, and WARNING +// enable admonition icons by setting `:icons: font` in the document header +[NOTE] +==== +admonition - a notice for the reader, ranging in severity from a tip to an alert +==== + +==== +example - a demonstration of the concept being documented +==== + +.Toggle Me +[%collapsible] +==== +collapsible - these details are revealed by clicking the title +==== + +**** +sidebar - auxiliary content that can be read independently of the main content +**** + +.... +literal - an exhibit that features program output +.... + +---- +listing - an exhibit that features program input, source code, or the contents of a file +---- + +[,language] +---- +source - a listing that is embellished with (colorized) syntax highlighting +---- + +```language +fenced code - a shorthand syntax for the source block +``` + +[,attribution,citetitle] +____ +quote - a quotation or excerpt; attribution with title of source are optional +____ + +[verse,attribution,citetitle] +____ +verse - a literary excerpt, often a poem; attribution with title of source are optional +____ + +++++ +pass - content passed directly to the output document; often raw HTML +++++ + +// activate stem support by adding `:stem:` to the document header +[stem] +++++ +x = y^2 +++++ + +//// +comment - content which is not included in the output document +//// + +== Tables + +.Table Attributes +[cols=>1h;2d,width=50%,frame=topbot] +|=== +| Attribute Name | Values + +| options +| header,footer,autowidth + +| cols +| colspec[;colspec;...] + +| grid +| all \| cols \| rows \| none + +| frame +| all \| sides \| topbot \| none + +| stripes +| all \| even \| odd \| none + +| width +| (0%..100%) + +| format +| psv {vbar} csv {vbar} dsv +|=== + +== Multimedia + +image::screenshot.png[block image,800,450] + +Press image:reload.svg[reload,16,opts=interactive] to reload the page. + +video::movie.mp4[width=640,start=60,end=140,options=autoplay] + +video::aHjpOzsQ9YI[youtube] + +video::300817511[vimeo] + +== Breaks + +// thematic break (aka horizontal rule) +--- + +// page break +<<< diff -Nru asciidoctor-1.5.5/data/stylesheets/asciidoctor-default.css asciidoctor-2.0.10/data/stylesheets/asciidoctor-default.css --- asciidoctor-1.5.5/data/stylesheets/asciidoctor-default.css 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/data/stylesheets/asciidoctor-default.css 2019-08-18 16:11:54.000000000 +0000 @@ -1,13 +1,11 @@ -/* Asciidoctor default stylesheet | MIT License | http://asciidoctor.org */ -/* Remove comment around @import statement below when using as a custom stylesheet */ +/* Asciidoctor default stylesheet | MIT License | https://asciidoctor.org */ +/* Uncomment @import statement to use as custom stylesheet */ /*@import "https://fonts.googleapis.com/css?family=Open+Sans:300,300italic,400,400italic,600,600italic%7CNoto+Serif:400,400italic,700,700italic%7CDroid+Sans+Mono:400,700";*/ -article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block} -audio,canvas,video{display:inline-block} +article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section{display:block} +audio,video{display:inline-block} audio:not([controls]){display:none;height:0} -[hidden],template{display:none} -script{display:none!important} html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%} -a{background:transparent} +a{background:none} a:focus{outline:thin dotted} a:active,a:hover{outline:0} h1{font-size:2em;margin:.67em 0} @@ -34,12 +32,10 @@ button,html input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer} button[disabled],html input[disabled]{cursor:default} input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0} -input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box} -input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none} button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0} textarea{overflow:auto;vertical-align:top} table{border-collapse:collapse;border-spacing:0} -*,*:before,*:after{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box} +*,*::before,*::after{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box} html,body{font-size:100%} body{background:#fff;color:rgba(0,0,0,.8);padding:0;margin:0;font-family:"Noto Serif","DejaVu Serif",serif;font-weight:400;font-style:normal;line-height:1;position:relative;cursor:auto;tab-size:4;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased} a:hover{cursor:pointer} @@ -57,13 +53,12 @@ textarea{height:auto;min-height:50px} select{width:100%} .center{margin-left:auto;margin-right:auto} -.spread{width:100%} -p.lead,.paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{font-size:1.21875em;line-height:1.6} +.stretch{width:100%} .subheader,.admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{line-height:1.45;color:#7a2518;font-weight:400;margin-top:0;margin-bottom:.25em} div,dl,dt,dd,ul,ol,li,h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6,pre,form,p,blockquote,th,td{margin:0;padding:0;direction:ltr} a{color:#2156a5;text-decoration:underline;line-height:inherit} a:hover,a:focus{color:#1d4b8f} -a img{border:none} +a img{border:0} p{font-family:inherit;font-weight:400;font-size:1em;line-height:1.6;margin-bottom:1.25em;text-rendering:optimizeLegibility} p aside{font-size:.875em;line-height:1.35;font-style:italic} h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{font-family:"Open Sans","DejaVu Sans",sans-serif;font-weight:300;font-style:normal;color:#ba3925;text-rendering:optimizeLegibility;margin-top:1em;margin-bottom:.5em;line-height:1.0125em} @@ -73,19 +68,18 @@ h3,#toctitle,.sidebarblock>.content>.title{font-size:1.375em} h4,h5{font-size:1.125em} h6{font-size:1em} -hr{border:solid #ddddd8;border-width:1px 0 0;clear:both;margin:1.25em 0 1.1875em;height:0} +hr{border:solid #dddddf;border-width:1px 0 0;clear:both;margin:1.25em 0 1.1875em;height:0} em,i{font-style:italic;line-height:inherit} strong,b{font-weight:bold;line-height:inherit} small{font-size:60%;line-height:inherit} code{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;color:rgba(0,0,0,.9)} ul,ol,dl{font-size:1em;line-height:1.6;margin-bottom:1.25em;list-style-position:outside;font-family:inherit} -ul,ol,ul.no-bullet,ol.no-bullet{margin-left:1.5em} +ul,ol{margin-left:1.5em} ul li ul,ul li ol{margin-left:1.25em;margin-bottom:0;font-size:1em} ul.square li ul,ul.circle li ul,ul.disc li ul{list-style:inherit} ul.square{list-style-type:square} ul.circle{list-style-type:circle} ul.disc{list-style-type:disc} -ul.no-bullet{list-style:none} ol li ul,ol li ol{margin-left:1.25em;margin-bottom:0} dl dt{margin-bottom:.3125em;font-weight:bold} dl dd{margin-bottom:1.25em} @@ -93,58 +87,65 @@ abbr{text-transform:none} blockquote{margin:0 0 1.25em;padding:.5625em 1.25em 0 1.1875em;border-left:1px solid #ddd} blockquote cite{display:block;font-size:.9375em;color:rgba(0,0,0,.6)} -blockquote cite:before{content:"\2014 \0020"} +blockquote cite::before{content:"\2014 \0020"} blockquote cite a,blockquote cite a:visited{color:rgba(0,0,0,.6)} blockquote,blockquote p{line-height:1.6;color:rgba(0,0,0,.85)} -@media only screen and (min-width:768px){h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2} +@media screen and (min-width:768px){h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2} h1{font-size:2.75em} h2{font-size:2.3125em} h3,#toctitle,.sidebarblock>.content>.title{font-size:1.6875em} h4{font-size:1.4375em}} table{background:#fff;margin-bottom:1.25em;border:solid 1px #dedede} -table thead,table tfoot{background:#f7f8f7;font-weight:bold} +table thead,table tfoot{background:#f7f8f7} table thead tr th,table thead tr td,table tfoot tr th,table tfoot tr td{padding:.5em .625em .625em;font-size:inherit;color:rgba(0,0,0,.8);text-align:left} table tr th,table tr td{padding:.5625em .625em;font-size:inherit;color:rgba(0,0,0,.8)} -table tr.even,table tr.alt,table tr:nth-of-type(even){background:#f8f8f7} +table tr.even,table tr.alt{background:#f8f8f7} table thead tr th,table tfoot tr th,table tbody tr td,table tr td,table tfoot tr td{display:table-cell;line-height:1.6} h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2;word-spacing:-.05em} h1 strong,h2 strong,h3 strong,#toctitle strong,.sidebarblock>.content>.title strong,h4 strong,h5 strong,h6 strong{font-weight:400} -.clearfix:before,.clearfix:after,.float-group:before,.float-group:after{content:" ";display:table} -.clearfix:after,.float-group:after{clear:both} -*:not(pre)>code{font-size:.9375em;font-style:normal!important;letter-spacing:0;padding:.1em .5ex;word-spacing:-.15em;background-color:#f7f7f8;-webkit-border-radius:4px;border-radius:4px;line-height:1.45;text-rendering:optimizeSpeed;word-wrap:break-word} -*:not(pre)>code.nobreak{word-wrap:normal} -*:not(pre)>code.nowrap{white-space:nowrap} -pre,pre>code{line-height:1.45;color:rgba(0,0,0,.9);font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;text-rendering:optimizeSpeed} +.clearfix::before,.clearfix::after,.float-group::before,.float-group::after{content:" ";display:table} +.clearfix::after,.float-group::after{clear:both} +:not(pre):not([class^=L])>code{font-size:.9375em;font-style:normal!important;letter-spacing:0;padding:.1em .5ex;word-spacing:-.15em;background:#f7f7f8;-webkit-border-radius:4px;border-radius:4px;line-height:1.45;text-rendering:optimizeSpeed;word-wrap:break-word} +:not(pre)>code.nobreak{word-wrap:normal} +:not(pre)>code.nowrap{white-space:nowrap} +pre{color:rgba(0,0,0,.9);font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;line-height:1.45;text-rendering:optimizeSpeed} +pre code,pre pre{color:inherit;font-size:inherit;line-height:inherit} +pre>code{display:block} +pre.nowrap,pre.nowrap pre{white-space:pre;word-wrap:normal} em em{font-style:normal} strong strong{font-weight:400} .keyseq{color:rgba(51,51,51,.8)} -kbd{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;display:inline-block;color:rgba(0,0,0,.8);font-size:.65em;line-height:1.45;background-color:#f7f7f7;border:1px solid #ccc;-webkit-border-radius:3px;border-radius:3px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em white inset;box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em #fff inset;margin:0 .15em;padding:.2em .5em;vertical-align:middle;position:relative;top:-.1em;white-space:nowrap} +kbd{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;display:inline-block;color:rgba(0,0,0,.8);font-size:.65em;line-height:1.45;background:#f7f7f7;border:1px solid #ccc;-webkit-border-radius:3px;border-radius:3px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em white inset;box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em #fff inset;margin:0 .15em;padding:.2em .5em;vertical-align:middle;position:relative;top:-.1em;white-space:nowrap} .keyseq kbd:first-child{margin-left:0} .keyseq kbd:last-child{margin-right:0} -.menuseq,.menu{color:rgba(0,0,0,.8)} -b.button:before,b.button:after{position:relative;top:-1px;font-weight:400} -b.button:before{content:"[";padding:0 3px 0 2px} -b.button:after{content:"]";padding:0 2px 0 3px} +.menuseq,.menuref{color:#000} +.menuseq b:not(.caret),.menuref{font-weight:inherit} +.menuseq{word-spacing:-.02em} +.menuseq b.caret{font-size:1.25em;line-height:.8} +.menuseq i.caret{font-weight:bold;text-align:center;width:.45em} +b.button::before,b.button::after{position:relative;top:-1px;font-weight:400} +b.button::before{content:"[";padding:0 3px 0 2px} +b.button::after{content:"]";padding:0 2px 0 3px} p a>code:hover{color:rgba(0,0,0,.9)} #header,#content,#footnotes,#footer{width:100%;margin-left:auto;margin-right:auto;margin-top:0;margin-bottom:0;max-width:62.5em;*zoom:1;position:relative;padding-left:.9375em;padding-right:.9375em} -#header:before,#header:after,#content:before,#content:after,#footnotes:before,#footnotes:after,#footer:before,#footer:after{content:" ";display:table} -#header:after,#content:after,#footnotes:after,#footer:after{clear:both} +#header::before,#header::after,#content::before,#content::after,#footnotes::before,#footnotes::after,#footer::before,#footer::after{content:" ";display:table} +#header::after,#content::after,#footnotes::after,#footer::after{clear:both} #content{margin-top:1.25em} -#content:before{content:none} +#content::before{content:none} #header>h1:first-child{color:rgba(0,0,0,.85);margin-top:2.25rem;margin-bottom:0} -#header>h1:first-child+#toc{margin-top:8px;border-top:1px solid #ddddd8} -#header>h1:only-child,body.toc2 #header>h1:nth-last-child(2){border-bottom:1px solid #ddddd8;padding-bottom:8px} -#header .details{border-bottom:1px solid #ddddd8;line-height:1.45;padding-top:.25em;padding-bottom:.25em;padding-left:.25em;color:rgba(0,0,0,.6);display:-ms-flexbox;display:-webkit-flex;display:flex;-ms-flex-flow:row wrap;-webkit-flex-flow:row wrap;flex-flow:row wrap} +#header>h1:first-child+#toc{margin-top:8px;border-top:1px solid #dddddf} +#header>h1:only-child,body.toc2 #header>h1:nth-last-child(2){border-bottom:1px solid #dddddf;padding-bottom:8px} +#header .details{border-bottom:1px solid #dddddf;line-height:1.45;padding-top:.25em;padding-bottom:.25em;padding-left:.25em;color:rgba(0,0,0,.6);display:-ms-flexbox;display:-webkit-flex;display:flex;-ms-flex-flow:row wrap;-webkit-flex-flow:row wrap;flex-flow:row wrap} #header .details span:first-child{margin-left:-.125em} #header .details span.email a{color:rgba(0,0,0,.85)} #header .details br{display:none} -#header .details br+span:before{content:"\00a0\2013\00a0"} -#header .details br+span.author:before{content:"\00a0\22c5\00a0";color:rgba(0,0,0,.85)} -#header .details br+span#revremark:before{content:"\00a0|\00a0"} +#header .details br+span::before{content:"\00a0\2013\00a0"} +#header .details br+span.author::before{content:"\00a0\22c5\00a0";color:rgba(0,0,0,.85)} +#header .details br+span#revremark::before{content:"\00a0|\00a0"} #header #revnumber{text-transform:capitalize} -#header #revnumber:after{content:"\00a0"} -#content>h1:first-child:not([class]){color:rgba(0,0,0,.85);border-bottom:1px solid #ddddd8;padding-bottom:8px;margin-top:0;padding-top:1rem;margin-bottom:1.25rem} -#toc{border-bottom:1px solid #efefed;padding-bottom:.5em} +#header #revnumber::after{content:"\00a0"} +#content>h1:first-child:not([class]){color:rgba(0,0,0,.85);border-bottom:1px solid #dddddf;padding-bottom:8px;margin-top:0;padding-top:1rem;margin-bottom:1.25rem} +#toc{border-bottom:1px solid #e7e7e9;padding-bottom:.5em} #toc>ul{margin-left:.125em} #toc ul.sectlevel0>li>a{font-style:italic} #toc ul.sectlevel0 ul.sectlevel1{margin:.5em 0} @@ -153,16 +154,16 @@ #toc a{text-decoration:none} #toc a:active{text-decoration:underline} #toctitle{color:#7a2518;font-size:1.2em} -@media only screen and (min-width:768px){#toctitle{font-size:1.375em} +@media screen and (min-width:768px){#toctitle{font-size:1.375em} body.toc2{padding-left:15em;padding-right:0} -#toc.toc2{margin-top:0!important;background-color:#f8f8f7;position:fixed;width:15em;left:0;top:0;border-right:1px solid #efefed;border-top-width:0!important;border-bottom-width:0!important;z-index:1000;padding:1.25em 1em;height:100%;overflow:auto} +#toc.toc2{margin-top:0!important;background:#f8f8f7;position:fixed;width:15em;left:0;top:0;border-right:1px solid #e7e7e9;border-top-width:0!important;border-bottom-width:0!important;z-index:1000;padding:1.25em 1em;height:100%;overflow:auto} #toc.toc2 #toctitle{margin-top:0;margin-bottom:.8rem;font-size:1.2em} #toc.toc2>ul{font-size:.9em;margin-bottom:0} #toc.toc2 ul ul{margin-left:0;padding-left:1em} #toc.toc2 ul.sectlevel0 ul.sectlevel1{padding-left:0;margin-top:.5em;margin-bottom:.5em} body.toc2.toc-right{padding-left:0;padding-right:15em} -body.toc2.toc-right #toc.toc2{border-right-width:0;border-left:1px solid #efefed;left:auto;right:0}} -@media only screen and (min-width:1280px){body.toc2{padding-left:20em;padding-right:0} +body.toc2.toc-right #toc.toc2{border-right-width:0;border-left:1px solid #e7e7e9;left:auto;right:0}} +@media screen and (min-width:1280px){body.toc2{padding-left:20em;padding-right:0} #toc.toc2{width:20em} #toc.toc2 #toctitle{font-size:1.375em} #toc.toc2>ul{font-size:.95em} @@ -171,89 +172,101 @@ #content #toc{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px} #content #toc>:first-child{margin-top:0} #content #toc>:last-child{margin-bottom:0} -#footer{max-width:100%;background-color:rgba(0,0,0,.8);padding:1.25em} +#footer{max-width:100%;background:rgba(0,0,0,.8);padding:1.25em} #footer-text{color:rgba(255,255,255,.8);line-height:1.44} +#content{margin-bottom:.625em} .sect1{padding-bottom:.625em} -@media only screen and (min-width:768px){.sect1{padding-bottom:1.25em}} -.sect1+.sect1{border-top:1px solid #efefed} +@media screen and (min-width:768px){#content{margin-bottom:1.25em} +.sect1{padding-bottom:1.25em}} +.sect1:last-child{padding-bottom:0} +.sect1+.sect1{border-top:1px solid #e7e7e9} #content h1>a.anchor,h2>a.anchor,h3>a.anchor,#toctitle>a.anchor,.sidebarblock>.content>.title>a.anchor,h4>a.anchor,h5>a.anchor,h6>a.anchor{position:absolute;z-index:1001;width:1.5ex;margin-left:-1.5ex;display:block;text-decoration:none!important;visibility:hidden;text-align:center;font-weight:400} -#content h1>a.anchor:before,h2>a.anchor:before,h3>a.anchor:before,#toctitle>a.anchor:before,.sidebarblock>.content>.title>a.anchor:before,h4>a.anchor:before,h5>a.anchor:before,h6>a.anchor:before{content:"\00A7";font-size:.85em;display:block;padding-top:.1em} +#content h1>a.anchor::before,h2>a.anchor::before,h3>a.anchor::before,#toctitle>a.anchor::before,.sidebarblock>.content>.title>a.anchor::before,h4>a.anchor::before,h5>a.anchor::before,h6>a.anchor::before{content:"\00A7";font-size:.85em;display:block;padding-top:.1em} #content h1:hover>a.anchor,#content h1>a.anchor:hover,h2:hover>a.anchor,h2>a.anchor:hover,h3:hover>a.anchor,#toctitle:hover>a.anchor,.sidebarblock>.content>.title:hover>a.anchor,h3>a.anchor:hover,#toctitle>a.anchor:hover,.sidebarblock>.content>.title>a.anchor:hover,h4:hover>a.anchor,h4>a.anchor:hover,h5:hover>a.anchor,h5>a.anchor:hover,h6:hover>a.anchor,h6>a.anchor:hover{visibility:visible} #content h1>a.link,h2>a.link,h3>a.link,#toctitle>a.link,.sidebarblock>.content>.title>a.link,h4>a.link,h5>a.link,h6>a.link{color:#ba3925;text-decoration:none} #content h1>a.link:hover,h2>a.link:hover,h3>a.link:hover,#toctitle>a.link:hover,.sidebarblock>.content>.title>a.link:hover,h4>a.link:hover,h5>a.link:hover,h6>a.link:hover{color:#a53221} -.audioblock,.imageblock,.literalblock,.listingblock,.stemblock,.videoblock{margin-bottom:1.25em} +details,.audioblock,.imageblock,.literalblock,.listingblock,.stemblock,.videoblock{margin-bottom:1.25em} +details>summary:first-of-type{cursor:pointer;display:list-item;outline:none;margin-bottom:.75em} .admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{text-rendering:optimizeLegibility;text-align:left;font-family:"Noto Serif","DejaVu Serif",serif;font-size:1rem;font-style:italic} -table.tableblock>caption.title{white-space:nowrap;overflow:visible;max-width:0} -.paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{color:rgba(0,0,0,.85)} -table.tableblock #preamble>.sectionbody>.paragraph:first-of-type p{font-size:inherit} +table.tableblock.fit-content>caption.title{white-space:nowrap;width:0} +.paragraph.lead>p,#preamble>.sectionbody>[class="paragraph"]:first-of-type p{font-size:1.21875em;line-height:1.6;color:rgba(0,0,0,.85)} +table.tableblock #preamble>.sectionbody>[class="paragraph"]:first-of-type p{font-size:inherit} .admonitionblock>table{border-collapse:separate;border:0;background:none;width:100%} .admonitionblock>table td.icon{text-align:center;width:80px} .admonitionblock>table td.icon img{max-width:none} .admonitionblock>table td.icon .title{font-weight:bold;font-family:"Open Sans","DejaVu Sans",sans-serif;text-transform:uppercase} -.admonitionblock>table td.content{padding-left:1.125em;padding-right:1.25em;border-left:1px solid #ddddd8;color:rgba(0,0,0,.6)} +.admonitionblock>table td.content{padding-left:1.125em;padding-right:1.25em;border-left:1px solid #dddddf;color:rgba(0,0,0,.6)} .admonitionblock>table td.content>:last-child>:last-child{margin-bottom:0} .exampleblock>.content{border-style:solid;border-width:1px;border-color:#e6e6e6;margin-bottom:1.25em;padding:1.25em;background:#fff;-webkit-border-radius:4px;border-radius:4px} .exampleblock>.content>:first-child{margin-top:0} .exampleblock>.content>:last-child{margin-bottom:0} -.sidebarblock{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px} +.sidebarblock{border-style:solid;border-width:1px;border-color:#dbdbd6;margin-bottom:1.25em;padding:1.25em;background:#f3f3f2;-webkit-border-radius:4px;border-radius:4px} .sidebarblock>:first-child{margin-top:0} .sidebarblock>:last-child{margin-bottom:0} .sidebarblock>.content>.title{color:#7a2518;margin-top:0;text-align:center} .exampleblock>.content>:last-child>:last-child,.exampleblock>.content .olist>ol>li:last-child>:last-child,.exampleblock>.content .ulist>ul>li:last-child>:last-child,.exampleblock>.content .qlist>ol>li:last-child>:last-child,.sidebarblock>.content>:last-child>:last-child,.sidebarblock>.content .olist>ol>li:last-child>:last-child,.sidebarblock>.content .ulist>ul>li:last-child>:last-child,.sidebarblock>.content .qlist>ol>li:last-child>:last-child{margin-bottom:0} -.literalblock pre,.listingblock pre:not(.highlight),.listingblock pre[class="highlight"],.listingblock pre[class^="highlight "],.listingblock pre.CodeRay,.listingblock pre.prettyprint{background:#f7f7f8} -.sidebarblock .literalblock pre,.sidebarblock .listingblock pre:not(.highlight),.sidebarblock .listingblock pre[class="highlight"],.sidebarblock .listingblock pre[class^="highlight "],.sidebarblock .listingblock pre.CodeRay,.sidebarblock .listingblock pre.prettyprint{background:#f2f1f1} -.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{-webkit-border-radius:4px;border-radius:4px;word-wrap:break-word;padding:1em;font-size:.8125em} -.literalblock pre.nowrap,.literalblock pre[class].nowrap,.listingblock pre.nowrap,.listingblock pre[class].nowrap{overflow-x:auto;white-space:pre;word-wrap:normal} -@media only screen and (min-width:768px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:.90625em}} -@media only screen and (min-width:1280px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:1em}} -.literalblock.output pre{color:#f7f7f8;background-color:rgba(0,0,0,.9)} +.literalblock pre,.listingblock>.content>pre{-webkit-border-radius:4px;border-radius:4px;word-wrap:break-word;overflow-x:auto;padding:1em;font-size:.8125em} +@media screen and (min-width:768px){.literalblock pre,.listingblock>.content>pre{font-size:.90625em}} +@media screen and (min-width:1280px){.literalblock pre,.listingblock>.content>pre{font-size:1em}} +.literalblock pre,.listingblock>.content>pre:not(.highlight),.listingblock>.content>pre[class="highlight"],.listingblock>.content>pre[class^="highlight "]{background:#f7f7f8} +.literalblock.output pre{color:#f7f7f8;background:rgba(0,0,0,.9)} +.listingblock>.content{position:relative} +.listingblock code[data-lang]::before{display:none;content:attr(data-lang);position:absolute;font-size:.75em;top:.425rem;right:.5rem;line-height:1;text-transform:uppercase;color:inherit;opacity:.5} +.listingblock:hover code[data-lang]::before{display:block} +.listingblock.terminal pre .command::before{content:attr(data-prompt);padding-right:.5em;color:inherit;opacity:.5} +.listingblock.terminal pre .command:not([data-prompt])::before{content:"$"} .listingblock pre.highlightjs{padding:0} .listingblock pre.highlightjs>code{padding:1em;-webkit-border-radius:4px;border-radius:4px} .listingblock pre.prettyprint{border-width:0} -.listingblock>.content{position:relative} -.listingblock code[data-lang]:before{display:none;content:attr(data-lang);position:absolute;font-size:.75em;top:.425rem;right:.5rem;line-height:1;text-transform:uppercase;color:#999} -.listingblock:hover code[data-lang]:before{display:block} -.listingblock.terminal pre .command:before{content:attr(data-prompt);padding-right:.5em;color:#999} -.listingblock.terminal pre .command:not([data-prompt]):before{content:"$"} -table.pyhltable{border-collapse:separate;border:0;margin-bottom:0;background:none} -table.pyhltable td{vertical-align:top;padding-top:0;padding-bottom:0;line-height:1.45} -table.pyhltable td.code{padding-left:.75em;padding-right:0} -pre.pygments .lineno,table.pyhltable td:not(.code){color:#999;padding-left:0;padding-right:.5em;border-right:1px solid #ddddd8} -pre.pygments .lineno{display:inline-block;margin-right:.25em} -table.pyhltable .linenodiv{background:none!important;padding-right:0!important} +.prettyprint{background:#f7f7f8} +pre.prettyprint .linenums{line-height:1.45;margin-left:2em} +pre.prettyprint li{background:none;list-style-type:inherit;padding-left:0} +pre.prettyprint li code[data-lang]::before{opacity:1} +pre.prettyprint li:not(:first-child) code[data-lang]::before{display:none} +table.linenotable{border-collapse:separate;border:0;margin-bottom:0;background:none} +table.linenotable td[class]{color:inherit;vertical-align:top;padding:0;line-height:inherit;white-space:normal} +table.linenotable td.code{padding-left:.75em} +table.linenotable td.linenos{border-right:1px solid currentColor;opacity:.35;padding-right:.5em} +pre.pygments .lineno{border-right:1px solid currentColor;opacity:.35;display:inline-block;margin-right:.75em} +pre.pygments .lineno::before{content:"";margin-right:-.125em} .quoteblock{margin:0 1em 1.25em 1.5em;display:table} -.quoteblock>.title{margin-left:-1.5em;margin-bottom:.75em} -.quoteblock blockquote,.quoteblock blockquote p{color:rgba(0,0,0,.85);font-size:1.15rem;line-height:1.75;word-spacing:.1em;letter-spacing:0;font-style:italic;text-align:justify} +.quoteblock:not(.excerpt)>.title{margin-left:-1.5em;margin-bottom:.75em} +.quoteblock blockquote,.quoteblock p{color:rgba(0,0,0,.85);font-size:1.15rem;line-height:1.75;word-spacing:.1em;letter-spacing:0;font-style:italic;text-align:justify} .quoteblock blockquote{margin:0;padding:0;border:0} -.quoteblock blockquote:before{content:"\201c";float:left;font-size:2.75em;font-weight:bold;line-height:.6em;margin-left:-.6em;color:#7a2518;text-shadow:0 1px 2px rgba(0,0,0,.1)} +.quoteblock blockquote::before{content:"\201c";float:left;font-size:2.75em;font-weight:bold;line-height:.6em;margin-left:-.6em;color:#7a2518;text-shadow:0 1px 2px rgba(0,0,0,.1)} .quoteblock blockquote>.paragraph:last-child p{margin-bottom:0} -.quoteblock .attribution{margin-top:.5em;margin-right:.5ex;text-align:right} -.quoteblock .quoteblock{margin-left:0;margin-right:0;padding:.5em 0;border-left:3px solid rgba(0,0,0,.6)} -.quoteblock .quoteblock blockquote{padding:0 0 0 .75em} -.quoteblock .quoteblock blockquote:before{display:none} -.verseblock{margin:0 1em 1.25em 1em} +.quoteblock .attribution{margin-top:.75em;margin-right:.5ex;text-align:right} +.verseblock{margin:0 1em 1.25em} .verseblock pre{font-family:"Open Sans","DejaVu Sans",sans;font-size:1.15rem;color:rgba(0,0,0,.85);font-weight:300;text-rendering:optimizeLegibility} .verseblock pre strong{font-weight:400} .verseblock .attribution{margin-top:1.25rem;margin-left:.5ex} .quoteblock .attribution,.verseblock .attribution{font-size:.9375em;line-height:1.45;font-style:italic} .quoteblock .attribution br,.verseblock .attribution br{display:none} .quoteblock .attribution cite,.verseblock .attribution cite{display:block;letter-spacing:-.025em;color:rgba(0,0,0,.6)} -.quoteblock.abstract{margin:0 0 1.25em 0;display:block} -.quoteblock.abstract blockquote,.quoteblock.abstract blockquote p{text-align:left;word-spacing:0} -.quoteblock.abstract blockquote:before,.quoteblock.abstract blockquote p:first-of-type:before{display:none} +.quoteblock.abstract blockquote::before,.quoteblock.excerpt blockquote::before,.quoteblock .quoteblock blockquote::before{display:none} +.quoteblock.abstract blockquote,.quoteblock.abstract p,.quoteblock.excerpt blockquote,.quoteblock.excerpt p,.quoteblock .quoteblock blockquote,.quoteblock .quoteblock p{line-height:1.6;word-spacing:0} +.quoteblock.abstract{margin:0 1em 1.25em;display:block} +.quoteblock.abstract>.title{margin:0 0 .375em;font-size:1.15em;text-align:center} +.quoteblock.excerpt>blockquote,.quoteblock .quoteblock{padding:0 0 .25em 1em;border-left:.25em solid #dddddf} +.quoteblock.excerpt,.quoteblock .quoteblock{margin-left:0} +.quoteblock.excerpt blockquote,.quoteblock.excerpt p,.quoteblock .quoteblock blockquote,.quoteblock .quoteblock p{color:inherit;font-size:1.0625rem} +.quoteblock.excerpt .attribution,.quoteblock .quoteblock .attribution{color:inherit;text-align:left;margin-right:0} table.tableblock{max-width:100%;border-collapse:separate} -table.tableblock td>.paragraph:last-child p>p:last-child,table.tableblock th>p:last-child,table.tableblock td>p:last-child{margin-bottom:0} +p.tableblock:last-child{margin-bottom:0} +td.tableblock>.content>:last-child{margin-bottom:-1.25em} +td.tableblock>.content>:last-child.sidebarblock{margin-bottom:0} table.tableblock,th.tableblock,td.tableblock{border:0 solid #dedede} -table.grid-all th.tableblock,table.grid-all td.tableblock{border-width:0 1px 1px 0} -table.grid-all tfoot>tr>th.tableblock,table.grid-all tfoot>tr>td.tableblock{border-width:1px 1px 0 0} -table.grid-cols th.tableblock,table.grid-cols td.tableblock{border-width:0 1px 0 0} -table.grid-all *>tr>.tableblock:last-child,table.grid-cols *>tr>.tableblock:last-child{border-right-width:0} -table.grid-rows th.tableblock,table.grid-rows td.tableblock{border-width:0 0 1px 0} -table.grid-all tbody>tr:last-child>th.tableblock,table.grid-all tbody>tr:last-child>td.tableblock,table.grid-all thead:last-child>tr>th.tableblock,table.grid-rows tbody>tr:last-child>th.tableblock,table.grid-rows tbody>tr:last-child>td.tableblock,table.grid-rows thead:last-child>tr>th.tableblock{border-bottom-width:0} -table.grid-rows tfoot>tr>th.tableblock,table.grid-rows tfoot>tr>td.tableblock{border-width:1px 0 0 0} +table.grid-all>thead>tr>.tableblock,table.grid-all>tbody>tr>.tableblock{border-width:0 1px 1px 0} +table.grid-all>tfoot>tr>.tableblock{border-width:1px 1px 0 0} +table.grid-cols>*>tr>.tableblock{border-width:0 1px 0 0} +table.grid-rows>thead>tr>.tableblock,table.grid-rows>tbody>tr>.tableblock{border-width:0 0 1px} +table.grid-rows>tfoot>tr>.tableblock{border-width:1px 0 0} +table.grid-all>*>tr>.tableblock:last-child,table.grid-cols>*>tr>.tableblock:last-child{border-right-width:0} +table.grid-all>tbody>tr:last-child>.tableblock,table.grid-all>thead:last-child>tr>.tableblock,table.grid-rows>tbody>tr:last-child>.tableblock,table.grid-rows>thead:last-child>tr>.tableblock{border-bottom-width:0} table.frame-all{border-width:1px} table.frame-sides{border-width:0 1px} -table.frame-topbot{border-width:1px 0} +table.frame-topbot,table.frame-ends{border-width:1px 0} +table.stripes-all tr,table.stripes-odd tr:nth-of-type(odd),table.stripes-even tr:nth-of-type(even),table.stripes-hover tr:hover{background:#f8f8f7} th.halign-left,td.halign-left{text-align:left} th.halign-right,td.halign-right{text-align:right} th.halign-center,td.halign-center{text-align:center} @@ -265,19 +278,19 @@ tbody tr th,tbody tr th p,tfoot tr th,tfoot tr th p{color:rgba(0,0,0,.8);font-weight:bold} p.tableblock>code:only-child{background:none;padding:0} p.tableblock{font-size:1em} -td>div.verse{white-space:pre} ol{margin-left:1.75em} ul li ol{margin-left:1.5em} dl dd{margin-left:1.125em} dl dd:last-child,dl dd:last-child>:last-child{margin-bottom:0} ol>li p,ul>li p,ul dd,ol dd,.olist .olist,.ulist .ulist,.ulist .olist,.olist .ulist{margin-bottom:.625em} -ul.unstyled,ol.unnumbered,ul.checklist,ul.none{list-style-type:none} -ul.unstyled,ol.unnumbered,ul.checklist{margin-left:.625em} -ul.checklist li>p:first-child>.fa-square-o:first-child,ul.checklist li>p:first-child>.fa-check-square-o:first-child{width:1em;font-size:.85em} -ul.checklist li>p:first-child>input[type="checkbox"]:first-child{width:1em;position:relative;top:1px} -ul.inline{margin:0 auto .625em auto;margin-left:-1.375em;margin-right:0;padding:0;list-style:none;overflow:hidden} -ul.inline>li{list-style:none;float:left;margin-left:1.375em;display:block} -ul.inline>li>*{display:block} +ul.checklist,ul.none,ol.none,ul.no-bullet,ol.no-bullet,ol.unnumbered,ul.unstyled,ol.unstyled{list-style-type:none} +ul.no-bullet,ol.no-bullet,ol.unnumbered{margin-left:.625em} +ul.unstyled,ol.unstyled{margin-left:0} +ul.checklist{margin-left:.625em} +ul.checklist li>p:first-child>.fa-square-o:first-child,ul.checklist li>p:first-child>.fa-check-square-o:first-child{width:1.25em;font-size:.8em;position:relative;bottom:.125em} +ul.checklist li>p:first-child>input[type="checkbox"]:first-child{margin-right:.25em} +ul.inline{display:-ms-flexbox;display:-webkit-box;display:flex;-ms-flex-flow:row wrap;-webkit-flex-flow:row wrap;flex-flow:row wrap;list-style:none;margin:0 0 .625em -1.25em} +ul.inline>li{margin-left:1.25em} .unstyled dl dt{font-weight:400;font-style:normal} ol.arabic{list-style-type:decimal} ol.decimal{list-style-type:decimal-leading-zero} @@ -291,11 +304,12 @@ td.hdlist1,td.hdlist2{vertical-align:top;padding:0 .625em} td.hdlist1{font-weight:bold;padding-bottom:1.25em} .literalblock+.colist,.listingblock+.colist{margin-top:-.5em} -.colist>table tr>td:first-of-type{padding:0 .75em;line-height:1} -.colist>table tr>td:last-of-type{padding:.25em 0} +.colist td:not([class]):first-child{padding:.4em .75em 0;line-height:1;vertical-align:top} +.colist td:not([class]):first-child img{max-width:none} +.colist td:not([class]):last-child{padding:.25em 0} .thumb,.th{line-height:0;display:inline-block;border:solid 4px #fff;-webkit-box-shadow:0 0 0 1px #ddd;box-shadow:0 0 0 1px #ddd} -.imageblock.left,.imageblock[style*="float: left"]{margin:.25em .625em 1.25em 0} -.imageblock.right,.imageblock[style*="float: right"]{margin:.25em 0 1.25em .625em} +.imageblock.left{margin:.25em .625em 1.25em 0} +.imageblock.right{margin:.25em 0 1.25em .625em} .imageblock>.title{margin-bottom:0} .imageblock.thumb,.imageblock.th{border-width:6px} .imageblock.thumb>.title,.imageblock.th>.title{padding:0 .125em} @@ -308,9 +322,9 @@ sup.footnote a,sup.footnoteref a{text-decoration:none} sup.footnote a:active,sup.footnoteref a:active{text-decoration:underline} #footnotes{padding-top:.75em;padding-bottom:.75em;margin-bottom:.625em} -#footnotes hr{width:20%;min-width:6.25em;margin:-.25em 0 .75em 0;border-width:1px 0 0 0} -#footnotes .footnote{padding:0 .375em 0 .225em;line-height:1.3334;font-size:.875em;margin-left:1.2em;text-indent:-1.05em;margin-bottom:.2em} -#footnotes .footnote a:first-of-type{font-weight:bold;text-decoration:none} +#footnotes hr{width:20%;min-width:6.25em;margin:-.25em 0 .75em;border-width:1px 0 0} +#footnotes .footnote{padding:0 .375em 0 .225em;line-height:1.3334;font-size:.875em;margin-left:1.2em;margin-bottom:.2em} +#footnotes .footnote a:first-of-type{font-weight:bold;text-decoration:none;margin-left:-1.05em} #footnotes .footnote:last-of-type{margin-bottom:0} #content #footnotes{margin-top:-.625em;margin-bottom:0;padding:.75em 0} .gist .file-data>table{border:0;background:#fff;width:100%;margin-bottom:0} @@ -322,48 +336,49 @@ .overline{text-decoration:overline} .line-through{text-decoration:line-through} .aqua{color:#00bfbf} -.aqua-background{background-color:#00fafa} +.aqua-background{background:#00fafa} .black{color:#000} -.black-background{background-color:#000} +.black-background{background:#000} .blue{color:#0000bf} -.blue-background{background-color:#0000fa} +.blue-background{background:#0000fa} .fuchsia{color:#bf00bf} -.fuchsia-background{background-color:#fa00fa} +.fuchsia-background{background:#fa00fa} .gray{color:#606060} -.gray-background{background-color:#7d7d7d} +.gray-background{background:#7d7d7d} .green{color:#006000} -.green-background{background-color:#007d00} +.green-background{background:#007d00} .lime{color:#00bf00} -.lime-background{background-color:#00fa00} +.lime-background{background:#00fa00} .maroon{color:#600000} -.maroon-background{background-color:#7d0000} +.maroon-background{background:#7d0000} .navy{color:#000060} -.navy-background{background-color:#00007d} +.navy-background{background:#00007d} .olive{color:#606000} -.olive-background{background-color:#7d7d00} +.olive-background{background:#7d7d00} .purple{color:#600060} -.purple-background{background-color:#7d007d} +.purple-background{background:#7d007d} .red{color:#bf0000} -.red-background{background-color:#fa0000} +.red-background{background:#fa0000} .silver{color:#909090} -.silver-background{background-color:#bcbcbc} +.silver-background{background:#bcbcbc} .teal{color:#006060} -.teal-background{background-color:#007d7d} +.teal-background{background:#007d7d} .white{color:#bfbfbf} -.white-background{background-color:#fafafa} +.white-background{background:#fafafa} .yellow{color:#bfbf00} -.yellow-background{background-color:#fafa00} +.yellow-background{background:#fafa00} span.icon>.fa{cursor:default} +a span.icon>.fa{cursor:inherit} .admonitionblock td.icon [class^="fa icon-"]{font-size:2.5em;text-shadow:1px 1px 2px rgba(0,0,0,.5);cursor:default} -.admonitionblock td.icon .icon-note:before{content:"\f05a";color:#19407c} -.admonitionblock td.icon .icon-tip:before{content:"\f0eb";text-shadow:1px 1px 2px rgba(155,155,0,.8);color:#111} -.admonitionblock td.icon .icon-warning:before{content:"\f071";color:#bf6900} -.admonitionblock td.icon .icon-caution:before{content:"\f06d";color:#bf3400} -.admonitionblock td.icon .icon-important:before{content:"\f06a";color:#bf0000} -.conum[data-value]{display:inline-block;color:#fff!important;background-color:rgba(0,0,0,.8);-webkit-border-radius:100px;border-radius:100px;text-align:center;font-size:.75em;width:1.67em;height:1.67em;line-height:1.67em;font-family:"Open Sans","DejaVu Sans",sans-serif;font-style:normal;font-weight:bold} +.admonitionblock td.icon .icon-note::before{content:"\f05a";color:#19407c} +.admonitionblock td.icon .icon-tip::before{content:"\f0eb";text-shadow:1px 1px 2px rgba(155,155,0,.8);color:#111} +.admonitionblock td.icon .icon-warning::before{content:"\f071";color:#bf6900} +.admonitionblock td.icon .icon-caution::before{content:"\f06d";color:#bf3400} +.admonitionblock td.icon .icon-important::before{content:"\f06a";color:#bf0000} +.conum[data-value]{display:inline-block;color:#fff!important;background:rgba(0,0,0,.8);-webkit-border-radius:100px;border-radius:100px;text-align:center;font-size:.75em;width:1.67em;height:1.67em;line-height:1.67em;font-family:"Open Sans","DejaVu Sans",sans-serif;font-style:normal;font-weight:bold} .conum[data-value] *{color:#fff!important} .conum[data-value]+b{display:none} -.conum[data-value]:after{content:attr(data-value)} +.conum[data-value]::after{content:attr(data-value)} pre .conum[data-value]{position:relative;top:-.125em} b.conum *{color:inherit!important} .conum:not([data-value]):empty{display:none} @@ -373,36 +388,39 @@ p,blockquote,dt,td.content,span.alt{font-size:1.0625rem} p{margin-bottom:1.25rem} .sidebarblock p,.sidebarblock dt,.sidebarblock td.content,p.tableblock{font-size:1em} -.exampleblock>.content{background-color:#fffef7;border-color:#e0e0dc;-webkit-box-shadow:0 1px 4px #e0e0dc;box-shadow:0 1px 4px #e0e0dc} +.exampleblock>.content{background:#fffef7;border-color:#e0e0dc;-webkit-box-shadow:0 1px 4px #e0e0dc;box-shadow:0 1px 4px #e0e0dc} .print-only{display:none!important} -@media print{@page{margin:1.25cm .75cm} -*{-webkit-box-shadow:none!important;box-shadow:none!important;text-shadow:none!important} +@page{margin:1.25cm .75cm} +@media print{*{-webkit-box-shadow:none!important;box-shadow:none!important;text-shadow:none!important} +html{font-size:80%} a{color:inherit!important;text-decoration:underline!important} a.bare,a[href^="#"],a[href^="mailto:"]{text-decoration:none!important} -a[href^="http:"]:not(.bare):after,a[href^="https:"]:not(.bare):after{content:"(" attr(href) ")";display:inline-block;font-size:.875em;padding-left:.25em} -abbr[title]:after{content:" (" attr(title) ")"} +a[href^="http:"]:not(.bare)::after,a[href^="https:"]:not(.bare)::after{content:"(" attr(href) ")";display:inline-block;font-size:.875em;padding-left:.25em} +abbr[title]::after{content:" (" attr(title) ")"} pre,blockquote,tr,img,object,svg{page-break-inside:avoid} thead{display:table-header-group} svg{max-width:100%} p,blockquote,dt,td.content{font-size:1em;orphans:3;widows:3} h2,h3,#toctitle,.sidebarblock>.content>.title{page-break-after:avoid} #toc,.sidebarblock,.exampleblock>.content{background:none!important} -#toc{border-bottom:1px solid #ddddd8!important;padding-bottom:0!important} -.sect1{padding-bottom:0!important} -.sect1+.sect1{border:0!important} -#header>h1:first-child{margin-top:1.25rem} +#toc{border-bottom:1px solid #dddddf!important;padding-bottom:0!important} body.book #header{text-align:center} -body.book #header>h1:first-child{border:0!important;margin:2.5em 0 1em 0} +body.book #header>h1:first-child{border:0!important;margin:2.5em 0 1em} body.book #header .details{border:0!important;display:block;padding:0!important} body.book #header .details span:first-child{margin-left:0!important} body.book #header .details br{display:block} -body.book #header .details br+span:before{content:none!important} +body.book #header .details br+span::before{content:none!important} body.book #toc{border:0!important;text-align:left!important;padding:0!important;margin:0!important} body.book #toc,body.book #preamble,body.book h1.sect0,body.book .sect1>h2{page-break-before:always} -.listingblock code[data-lang]:before{display:block} -#footer{background:none!important;padding:0 .9375em} -#footer-text{color:rgba(0,0,0,.6)!important;font-size:.9em} +.listingblock code[data-lang]::before{display:block} +#footer{padding:0 .9375em} .hide-on-print{display:none!important} .print-only{display:block!important} .hide-for-print{display:none!important} .show-for-print{display:inherit!important}} +@media print,amzn-kf8{#header>h1:first-child{margin-top:1.25rem} +.sect1{padding:0!important} +.sect1+.sect1{border:0} +#footer{background:none} +#footer-text{color:rgba(0,0,0,.6);font-size:.9em}} +@media amzn-kf8{#header,#content,#footnotes,#footer{padding:0}} diff -Nru asciidoctor-1.5.5/data/stylesheets/coderay-asciidoctor.css asciidoctor-2.0.10/data/stylesheets/coderay-asciidoctor.css --- asciidoctor-1.5.5/data/stylesheets/coderay-asciidoctor.css 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/data/stylesheets/coderay-asciidoctor.css 2019-08-18 16:11:54.000000000 +0000 @@ -1,14 +1,12 @@ /* Stylesheet for CodeRay to match GitHub theme | MIT License | http://foundation.zurb.com */ -/*pre.CodeRay {background-color:#f7f7f8;}*/ -.CodeRay .line-numbers{border-right:1px solid #d8d8d8;padding:0 0.5em 0 .25em} -.CodeRay span.line-numbers{display:inline-block;margin-right:.5em;color:rgba(0,0,0,.3)} -.CodeRay .line-numbers strong{color:rgba(0,0,0,.4)} -table.CodeRay{border-collapse:separate;border-spacing:0;margin-bottom:0;border:0;background:none} -table.CodeRay td{vertical-align: top;line-height:1.45} +pre.CodeRay{background:#f7f7f8} +.CodeRay .line-numbers{border-right:1px solid currentColor;opacity:.35;padding:0 .5em 0 0} +.CodeRay span.line-numbers{display:inline-block;margin-right:.75em} +.CodeRay .line-numbers strong{color:#000} +table.CodeRay{border-collapse:separate;border:0;margin-bottom:0;background:none} +table.CodeRay td{vertical-align:top;line-height:inherit} table.CodeRay td.line-numbers{text-align:right} -table.CodeRay td.line-numbers>pre{padding:0;color:rgba(0,0,0,.3)} -table.CodeRay td.code{padding:0 0 0 .5em} -table.CodeRay td.code>pre{padding:0} +table.CodeRay td.code{padding:0 0 0 .75em} .CodeRay .debug{color:#fff !important;background:#000080 !important} .CodeRay .annotation{color:#007} .CodeRay .attribute-name{color:#000080} diff -Nru asciidoctor-1.5.5/debian/asciidoctor.install asciidoctor-2.0.10/debian/asciidoctor.install --- asciidoctor-1.5.5/debian/asciidoctor.install 2017-07-02 09:31:04.000000000 +0000 +++ asciidoctor-2.0.10/debian/asciidoctor.install 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -data/* usr/share/asciidoctor diff -Nru asciidoctor-1.5.5/debian/changelog asciidoctor-2.0.10/debian/changelog --- asciidoctor-1.5.5/debian/changelog 2021-06-04 18:02:56.000000000 +0000 +++ asciidoctor-2.0.10/debian/changelog 2022-12-29 22:24:13.000000000 +0000 @@ -1,8 +1,103 @@ -asciidoctor (1.5.5-1~16.04.sav0) xenial; urgency=medium +asciidoctor (2.0.10-2~16.04.sav0) xenial; urgency=medium * Backport to Xenial + * debian/control: Set debhelper-compat (= 10) BD (LP highest for Xenial) - -- Rob Savoury Fri, 04 Jun 2021 11:02:56 -0700 + -- Rob Savoury Thu, 29 Dec 2022 14:24:13 -0800 + +asciidoctor (2.0.10-2) unstable; urgency=medium + + * Patch to fix the autoload of the syntax_highlighter (Closes: #935196) + + -- Joseph Herlant Tue, 20 Aug 2019 15:53:25 -0700 + +asciidoctor (2.0.10-1) unstable; urgency=medium + + * New upstream version 2.0.10 (Closes: #934998) + * d/watch: move to use github as source + * Refresh the patch list for the new version + * Fix issue with missing test suite from gem and add autopkgtest cases + * d/control: + * New dependency on ruby-rouge + * bump compat to 12 + * bump standards to 4.4.0.1 + + [ Utkarsh Gupta ] + * Add salsa-ci.yml + + -- Joseph Herlant Mon, 19 Aug 2019 15:43:53 -0700 + +asciidoctor (1.5.8-1) unstable; urgency=medium + + * New upstream version 1.5.8 + + CVE-2018-18385: fix infinite loop in Parser#next_block (Closes: #913892) + * Refresh patches for new version + * Fix Timezone tests when SOURCE_DATE_EPOCH is set. + * d/control: bump standards to 4.2.1 + * d/control: ruby-thread-safe has been dropped from upstream + in favor of ruby-concurrent + * d/control: add dependency version from ruby-asciidoctor to the ruby pkg + * Switch the architecture of asciidoctor back to all (Closes: #909105) + * Set asciidoctor-doc Multi-Arch: foreign + * Update my email to my new debian one + * Use the new debhelper-compat(=11) notation and drop d/compat. + + -- Joseph Herlant Thu, 22 Nov 2018 21:43:15 -0800 + +asciidoctor (1.5.7.1-1) unstable; urgency=low + + * New upstream version 1.5.7.1 + * Remove lib_directory_in_tests.patch integrated upstream + * Remove package-version.patch integrated upstream + * Refresh quilt patches on new version + + -- Joseph Herlant Sun, 13 May 2018 16:43:20 -0700 + +asciidoctor (1.5.7-1) unstable; urgency=low + + * New upstream version 1.5.7 (Closes: #895186, #895187) + * remove fix-manpage-topic.patch merged upstream + * remove re-add_README.patch merged upstream + * remove skip-asciimath-test.patch as obsolete thanks to upstream change + * refresh quilt patches against the new version + * d/rules: use ASCIIDOCTOR_MANPAGE_PATH to avoid issues during the tests + * d/ruby-tests.rake: add ASCIIDOCTOR_MANPAGE_PATH to autopkgtests + * Update the privacy breach patch with the latest version requirements + * Change the lib_directory_in_tests.patch to be forwardable upstream + * Make package-version.patch forwardable to upstream + + -- Joseph Herlant Sun, 06 May 2018 01:14:29 -0700 + +asciidoctor (1.5.6.2-2) unstable; urgency=low + + * Force the install of data files even when DH_CONFIG_ACT_ON_PACKAGES is set + (Closes: #895613) + + -- Joseph Herlant Fri, 13 Apr 2018 08:33:20 -0700 + +asciidoctor (1.5.6.2-1) unstable; urgency=low + + * New upstream version 1.5.6.2 (Closes: #884213) + * d/control: add myself as co-maintainer to avoid nmu warnings + * d/control, d/compat: update standards to 4.1.4 and compat to 11 with it + * d/control: update Vcs-* after migration to Salsa + * d/watch: fix debian-watch-uses-insecure-uri + * d/copyright: fix insecure-copyright-format-uri + * Update existing patches to match the new version + * Add patch to workaround an upstream bug removing the README.adoc + * Fix failing test when getting help on the manpage topic + * split asciidoctor into ruby-asciidoctor (Closes: #893467) + * d/control: update package description + * d/rules: disable webfonts to fix privacy-breach-generic + * Update patch to fix the manpage topic test + * d/control: move asciidoctor to section:text + * wrap-and-sort + * d/rules: fix duplicated-compressed-files lintian complaint + * d/rules: disable icons and image-uri-screenshot in README generation + * Add patch to remove the badge in the README to fix privacy-breach-generic + * d/control: update package short description + + -- Joseph Herlant Wed, 11 Apr 2018 15:52:00 -0700 asciidoctor (1.5.5-1) unstable; urgency=medium diff -Nru asciidoctor-1.5.5/debian/compat asciidoctor-2.0.10/debian/compat --- asciidoctor-1.5.5/debian/compat 2017-07-02 09:29:53.000000000 +0000 +++ asciidoctor-2.0.10/debian/compat 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -9 diff -Nru asciidoctor-1.5.5/debian/control asciidoctor-2.0.10/debian/control --- asciidoctor-1.5.5/debian/control 2017-07-02 09:31:48.000000000 +0000 +++ asciidoctor-2.0.10/debian/control 2022-12-29 22:24:09.000000000 +0000 @@ -1,11 +1,12 @@ Source: asciidoctor -Section: ruby +Section: text Priority: optional Maintainer: Debian Ruby Extras Maintainers Uploaders: Per Andersson , - Guillaume Grossetie + Guillaume Grossetie , + Joseph Herlant Build-Depends: coderay, - debhelper (>= 9~), + debhelper-compat (= 10), erubis, gem2deb, locales, @@ -14,20 +15,35 @@ ruby-htmlentities, ruby-mocha, ruby-nokogiri (>= 1.5.10), + ruby-rouge, ruby-slim, - ruby-thread-safe, + ruby-concurrent, ruby-tilt -Standards-Version: 3.9.8 -Vcs-Git: https://anonscm.debian.org/git/pkg-ruby-extras/asciidoctor.git -Vcs-Browser: https://anonscm.debian.org/cgit/pkg-ruby-extras/asciidoctor.git +Standards-Version: 4.4.0.1 +Vcs-Git: https://salsa.debian.org/ruby-team/asciidoctor.git +Vcs-Browser: https://salsa.debian.org/ruby-team/asciidoctor Homepage: http://asciidoctor.org -Testsuite: autopkgtest-pkg-ruby XS-Ruby-Versions: all +Package: ruby-asciidoctor +Section: ruby +Architecture: all +Replaces: asciidoctor (<< 1.5.6.2-1) +Breaks: asciidoctor (<< 1.5.6.2-1) +XB-Ruby-Versions: ${ruby:Versions} +Depends: ruby | ruby-interpreter, ${misc:Depends}, ${shlibs:Depends} +Description: AsciiDoc to HTML rendering for Ruby (core libraries) + Asciidoctor is a pure Ruby processor for converting AsciiDoc source files and + strings into HTML 5, DocBook 4.5, DocBook 5.0 and other formats. + . + This package contains the library files used by the asciidoctor package. + Package: asciidoctor Architecture: all +Multi-Arch: foreign XB-Ruby-Versions: ${ruby:Versions} Depends: ruby | ruby-interpreter, + ruby-asciidoctor (>= ${source:Version}), ${misc:Depends}, ${shlibs:Depends} Description: AsciiDoc to HTML rendering for Ruby @@ -36,9 +52,9 @@ Package: asciidoctor-doc Architecture: all +Multi-Arch: foreign Section: doc -Depends: libjs-jquery, - ${misc:Depends} +Depends: libjs-jquery, ${misc:Depends} Suggests: asciidoctor Description: AsciiDoc to HTML rendering for Ruby (documentation) Asciidoctor is a pure Ruby processor for converting AsciiDoc source files and diff -Nru asciidoctor-1.5.5/debian/copyright asciidoctor-2.0.10/debian/copyright --- asciidoctor-1.5.5/debian/copyright 2017-07-02 09:29:53.000000000 +0000 +++ asciidoctor-2.0.10/debian/copyright 2019-08-18 16:19:20.000000000 +0000 @@ -1,4 +1,4 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: asciidoctor Source: http://asciidoctor.org/ diff -Nru asciidoctor-1.5.5/debian/patches/data_path.patch asciidoctor-2.0.10/debian/patches/data_path.patch --- asciidoctor-1.5.5/debian/patches/data_path.patch 2017-07-02 09:34:55.000000000 +0000 +++ asciidoctor-2.0.10/debian/patches/data_path.patch 2019-08-18 16:19:20.000000000 +0000 @@ -1,14 +1,15 @@ From: Guillaume Grossetie Subject: Load data from /usr/share/asciidoctor path Bug-Debian: https://bugs.debian.org/788052 +Last-update: 2019-06-11 --- a/lib/asciidoctor.rb +++ b/lib/asciidoctor.rb -@@ -175,7 +175,7 @@ module Asciidoctor - LIB_PATH = ::File.join ROOT_PATH, 'lib' +@@ -182,7 +182,7 @@ + LIB_DIR = ::File.join ROOT_DIR, 'lib' - # The absolute data path of the Asciidoctor RubyGem -- DATA_PATH = ::File.join ROOT_PATH, 'data' -+ DATA_PATH = ::File.exist?(asciidoctor_lib_path = ::File.join(ROOT_PATH, 'data')) ? asciidoctor_lib_path : ::File.join(ROOT_PATH, '../../share/asciidoctor') + # The absolute data directory of the Asciidoctor RubyGem +- DATA_DIR = ::File.join ROOT_DIR, 'data' ++ DATA_DIR = ::File.exist?(asciidoctor_lib_path = ::File.join(ROOT_DIR, 'data')) ? asciidoctor_lib_path : ::File.join(ROOT_DIR, '../../share/ruby-asciidoctor') # The user's home directory, as best we can determine it - # NOTE not using infix rescue for performance reasons, see: https://github.com/jruby/jruby/issues/1816 + # IMPORTANT this rescue is required for running Asciidoctor on GitHub.com diff -Nru asciidoctor-1.5.5/debian/patches/lib_directory_in_tests.patch asciidoctor-2.0.10/debian/patches/lib_directory_in_tests.patch --- asciidoctor-1.5.5/debian/patches/lib_directory_in_tests.patch 2017-07-02 09:34:55.000000000 +0000 +++ asciidoctor-2.0.10/debian/patches/lib_directory_in_tests.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -From: Guillaume Grossetie -Subject: Exclude lib/ directory from tests ---- a/Rakefile -+++ b/Rakefile -@@ -20,7 +20,7 @@ begin - Rake::TestTask.new(:test) do |test| - prepare_test_env - puts %(LANG: #{ENV['LANG']}) if ENV.key? 'TRAVIS_BUILD_ID' -- test.libs << 'test' -+ test.libs = ['test'] - test.pattern = 'test/**/*_test.rb' - test.verbose = true - test.warning = true ---- a/test/test_helper.rb -+++ b/test/test_helper.rb -@@ -8,7 +8,7 @@ end - - require 'simplecov' if ENV['COVERAGE'] == 'true' - --require File.join(ASCIIDOCTOR_PROJECT_DIR, 'lib', 'asciidoctor') -+require 'asciidoctor' - - require 'socket' - require 'nokogiri' diff -Nru asciidoctor-1.5.5/debian/patches/package-version.patch asciidoctor-2.0.10/debian/patches/package-version.patch --- asciidoctor-1.5.5/debian/patches/package-version.patch 2017-07-02 09:34:55.000000000 +0000 +++ asciidoctor-2.0.10/debian/patches/package-version.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Description: gemspec: load asciidoctor/version from system is not available locally - This fixes running tests under autopkgtest -Author: Antonio Terceiro -Origin: vendor -Forwarded: no -Last-Update: 2015-11-17 ---- -This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ ---- a/asciidoctor.gemspec -+++ b/asciidoctor.gemspec -@@ -1,5 +1,6 @@ - # -*- encoding: utf-8 -*- --require File.expand_path '../lib/asciidoctor/version', __FILE__ -+$LOAD_PATH << File.expand_path('../lib', __FILE__) -+require 'asciidoctor/version' - require 'open3' unless defined? Open3 - - Gem::Specification.new do |s| ---- a/Rakefile -+++ b/Rakefile -@@ -1,4 +1,5 @@ --require File.expand_path '../lib/asciidoctor/version', __FILE__ -+$LOAD_PATH << File.expand_path('../lib', __FILE__) -+require 'asciidoctor/version' - - def prepare_test_env - # rather than hardcoding gc settings in test task, diff -Nru asciidoctor-1.5.5/debian/patches/privacy_breach.patch asciidoctor-2.0.10/debian/patches/privacy_breach.patch --- asciidoctor-1.5.5/debian/patches/privacy_breach.patch 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/debian/patches/privacy_breach.patch 2019-08-18 21:46:44.000000000 +0000 @@ -0,0 +1,57 @@ +Description: Remove the badge in the readme to avoid lintian complaining about + privacy-breach-generic +Author: Joseph Herlant +Forwarded: not-needed +Last-Update: 2018-05-05 +--- +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ +--- a/README.adoc ++++ b/README.adoc +@@ -442,7 +442,6 @@ + But we can't do it without your feedback! + We encourage you to ask questions and discuss any aspects of the project on the discussion list, on Twitter or in the chat room. + +-Chat (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] + Discussion list (Nabble):: {uri-discuss} + Twitter:: https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] hashtag or https://twitter.com/asciidoctor[@asciidoctor] mention + +--- a/README-fr.adoc ++++ b/README-fr.adoc +@@ -401,7 +401,6 @@ + Mais nous ne pouvons pas le faire sans vos avis ! + Nous vous encourageons à poser vos questions et à discuter de n'importe quels aspects du projet sur la liste de discussion, Twitter ou dans le salon de discussion. + +-Chat (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] + Forum (Nabble):: {uri-discuss} + Twitter:: hashtag https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] ou la mention https://twitter.com/asciidoctor[@asciidoctor] + //// +--- a/README-zh_CN.adoc ++++ b/README-zh_CN.adoc +@@ -395,7 +395,6 @@ + + 讨论组 (Nabble):: {uri-discuss} + Twitter:: https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] 来加入话题 或 https://twitter.com/asciidoctor[@asciidoctor] at并提醒我们 +-聊天 (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] + + ifdef::env-github[] + Further information and documentation about Asciidoctor can be found on the project's website. +--- a/README-de.adoc ++++ b/README-de.adoc +@@ -435,7 +435,6 @@ + Aber wir können es nicht ohne ihr Feedback machen! + Wir ermutigen Sie, Fragen zu stellen und alle Aspekte des Projekts auf der Diskussionsliste, auf Twitter oder im Chatroom zu diskutieren. + +-Chat (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] + Discussionsliste (Nabble):: {uri-discuss} + Twitter:: https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] hashtag or https://twitter.com/asciidoctor[@asciidoctor] mention + +--- a/README-jp.adoc ++++ b/README-jp.adoc +@@ -442,7 +442,6 @@ + しかしあなたからのフィードバックがなくてはAsciidoctorの開発は進みません! + ディスカッションリスト, Twitter, チャットルームを使って, 質問をしたりプロジェクトのさまざまな側面について話し合ったりすることをお勧めします. + +-チャット(Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] + ディスカッションリスト(Nabble):: {uri-discuss} + Twitter:: ハッシュタグ https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] またはメンション https://twitter.com/asciidoctor[@asciidoctor] + diff -Nru asciidoctor-1.5.5/debian/patches/series asciidoctor-2.0.10/debian/patches/series --- asciidoctor-1.5.5/debian/patches/series 2017-07-02 09:34:55.000000000 +0000 +++ asciidoctor-2.0.10/debian/patches/series 2019-08-18 21:46:44.000000000 +0000 @@ -1,5 +1,4 @@ data_path.patch -lib_directory_in_tests.patch -skip-asciimath-test.patch -package-version.patch skip-unreadable-file.patch +privacy_breach.patch +syntax_hightlighter.patch diff -Nru asciidoctor-1.5.5/debian/patches/skip-asciimath-test.patch asciidoctor-2.0.10/debian/patches/skip-asciimath-test.patch --- asciidoctor-1.5.5/debian/patches/skip-asciimath-test.patch 2017-07-02 09:34:55.000000000 +0000 +++ asciidoctor-2.0.10/debian/patches/skip-asciimath-test.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Description: Skip test that depends on unpackaged optional library -Author: Antonio Terceiro -Origin: vendor -Forwarded: not-needed -Last-Update: 2015-11-17 ---- -This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ ---- a/test/blocks_test.rb -+++ b/test/blocks_test.rb -@@ -1278,6 +1278,7 @@ sqrt(3x-1)+(1+x)^2 < y - end - - test 'should render asciimath block in textobject of equation in DocBook backend' do -+ skip 'depends on unpackaged dependency' - input = <<-'EOS' - [asciimath] - ++++ ---- a/test/substitutions_test.rb -+++ b/test/substitutions_test.rb -@@ -1374,6 +1374,7 @@ EOS - #end - - test 'should convert asciimath macro content to MathML when asciimath gem is available' do -+ skip 'depends on unpackaged dependency' - input = 'asciimath:[a < b]' - para = block_from_string input, :backend => :docbook - assert_equal 'a<b', para.content diff -Nru asciidoctor-1.5.5/debian/patches/skip-unreadable-file.patch asciidoctor-2.0.10/debian/patches/skip-unreadable-file.patch --- asciidoctor-1.5.5/debian/patches/skip-unreadable-file.patch 2017-07-02 09:34:55.000000000 +0000 +++ asciidoctor-2.0.10/debian/patches/skip-unreadable-file.patch 2019-08-18 16:19:20.000000000 +0000 @@ -7,11 +7,11 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/test/reader_test.rb +++ b/test/reader_test.rb -@@ -618,6 +618,7 @@ trailing content +@@ -789,6 +789,7 @@ end test 'unreadable file referenced by include directive is replaced by warning' do + skip 'file is readable in Debian build' include_file = File.join DIRNAME, 'fixtures', 'chapter-a.adoc' FileUtils.chmod 0000, include_file - input = <<-EOS + input = <<~'EOS' diff -Nru asciidoctor-1.5.5/debian/patches/syntax_hightlighter.patch asciidoctor-2.0.10/debian/patches/syntax_hightlighter.patch --- asciidoctor-1.5.5/debian/patches/syntax_hightlighter.patch 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/debian/patches/syntax_hightlighter.patch 2019-08-18 21:46:44.000000000 +0000 @@ -0,0 +1,20 @@ +Description: Fix an issue with the load of syntax_highlighter +Author: Joseph Herlant +Bug: https://github.com/asciidoctor/asciidoctor/issues/3394 +Bug-Debian: https://bugs.debian.org/935196 +Last-Update: 2019-08-20 +--- +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ +--- a/lib/asciidoctor.rb ++++ b/lib/asciidoctor.rb +@@ -511,8 +511,8 @@ + end unless RUBY_ENGINE == 'opal' + + unless RUBY_ENGINE == 'opal' +- autoload :SyntaxHighlighter, %(#{LIB_DIR}/asciidoctor/syntax_highlighter) +- autoload :Timings, %(#{LIB_DIR}/asciidoctor/timings) ++ autoload :SyntaxHighlighter, %(#{__dir__}/asciidoctor/syntax_highlighter) ++ autoload :Timings, %(#{__dir__}/asciidoctor/timings) + end + end + diff -Nru asciidoctor-1.5.5/debian/README.Debian asciidoctor-2.0.10/debian/README.Debian --- asciidoctor-1.5.5/debian/README.Debian 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/debian/README.Debian 2019-08-18 21:46:44.000000000 +0000 @@ -0,0 +1,8 @@ +Since version 2.0.0 of the package the tests have been removed from the gem +by upstream, so we switched to using the github source for fetching the tarball. +But as gem2deb still fetches the gem during the build, the tests are not +injected in the package. As they don't bring anything really to the package +for the end-user, we're fine with that. We just use the tests that are in the +source package in autopkgtest. + + -- Joseph Herlant Mon, 19 Aug 2019 10:36:15 -0800 diff -Nru asciidoctor-1.5.5/debian/ruby-tests.rake asciidoctor-2.0.10/debian/ruby-tests.rake --- asciidoctor-1.5.5/debian/ruby-tests.rake 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/debian/ruby-tests.rake 2019-08-18 21:46:44.000000000 +0000 @@ -1,144 +1,2 @@ -require File.expand_path '../lib/asciidoctor/version', __FILE__ - -def prepare_test_env - # rather than hardcoding gc settings in test task, - # could use https://gist.github.com/benders/788695 - ENV['RUBY_GC_MALLOC_LIMIT'] = 128_000_000.to_s - ENV['RUBY_GC_OLDMALLOC_LIMIT'] = 128_000_000.to_s - if RUBY_VERSION >= '2.1' - ENV['RUBY_GC_HEAP_INIT_SLOTS'] = 800_000.to_s - ENV['RUBY_GC_HEAP_FREE_SLOTS'] = 800_000.to_s - ENV['RUBY_GC_HEAP_GROWTH_MAX_SLOTS'] = 250_000.to_s - ENV['RUBY_GC_HEAP_GROWTH_FACTOR'] = 1.25.to_s - else - ENV['RUBY_FREE_MIN'] = 800_000.to_s - end -end - -begin - require 'rake/testtask' - Rake::TestTask.new(:test) do |test| - prepare_test_env - puts %(LANG: #{ENV['LANG']}) if ENV.key? 'TRAVIS_BUILD_ID' - test.libs << 'test' - test.pattern = 'test/**/*_test.rb' - test.verbose = true - test.warning = true - end - task :default => :test -rescue LoadError -end - -=begin -# Run tests with Encoding.default_external set to US-ASCII -begin - Rake::TestTask.new(:test_us_ascii) do |test| - prepare_test_env - puts "LANG: #{ENV['LANG']}" - test.libs << 'test' - test.pattern = 'test/**/*_test.rb' - test.ruby_opts << '-EUS-ASCII' if RUBY_VERSION >= '1.9' - test.verbose = true - test.warning = true - end -rescue LoadError -end -=end - -begin - require 'cucumber/rake/task' - Cucumber::Rake::Task.new(:features) do |t| - end -rescue LoadError -end - -def ci_setup_tasks - tasks = [] - begin - require 'ci/reporter/rake/minitest' - tasks << 'ci:setup:minitest' - # FIXME reporter for Cucumber tests not activating - #require 'ci/reporter/rake/cucumber' - #tasks << 'ci:setup:cucumber' - rescue LoadError - end if ENV['SHIPPABLE'] && RUBY_VERSION >= '1.9.3' - tasks -end - -desc 'Activates coverage and JUnit-style XML reports for tests' -task :coverage => ci_setup_tasks do - # exclude coverage run for Ruby 1.8.7 or (disabled) if running on Travis CI - ENV['COVERAGE'] = 'true' if RUBY_VERSION >= '1.9.3' # && (ENV['SHIPPABLE'] || !ENV['TRAVIS_BUILD_ID']) - ENV['CI_REPORTS'] = 'shippable/testresults' - ENV['COVERAGE_REPORTS'] = 'shippable/codecoverage' -end - -namespace :test do - desc 'Run unit and feature tests' - task :all => [:test,:features] -end - -=begin -begin - require 'rdoc/task' - RDoc::Task.new do |rdoc| - rdoc.rdoc_dir = 'rdoc' - rdoc.title = "Asciidoctor #{Asciidoctor::VERSION}" - rdoc.markup = 'tomdoc' if rdoc.respond_to?(:markup) - rdoc.rdoc_files.include('LICENSE.adoc', 'lib/**/*.rb') - end -rescue LoadError -end -=end - -begin - require 'yard' - require 'yard-tomdoc' - require './lib/asciidoctor' - require './lib/asciidoctor/extensions' - - # Prevent YARD from breaking command statements in literal paragraphs - class CommandBlockPostprocessor < Asciidoctor::Extensions::Postprocessor - def process document, output - output.gsub(/
\$ (.+?)<\/pre>/m, '
$ \1
') - end - end - Asciidoctor::Extensions.register do - postprocessor CommandBlockPostprocessor - end - - # register .adoc extension for AsciiDoc markup helper - YARD::Templates::Helpers::MarkupHelper::MARKUP_EXTENSIONS[:asciidoc] = %w(adoc) - YARD::Rake::YardocTask.new do |yard| - yard.files = %w( - lib/**/*.rb - - - CHANGELOG.adoc - LICENSE.adoc - ) - # --no-highlight enabled to prevent verbatim blocks in AsciiDoc that begin with $ from being dropped - # need to patch htmlify method to not attempt to syntax highlight blocks (or fix what's wrong) - yard.options = (IO.readlines '.yardopts').map {|l| l.chomp.delete('"').split ' ', 2 }.flatten - end -rescue LoadError -end - -begin - require 'bundler/gem_tasks' - - # Enhance the release task to create an explicit commit for the release - #Rake::Task[:release].enhance [:commit_release] - - # NOTE you don't need to push after updating version and committing locally - # WARNING no longer works; it's now necessary to get master in a state ready for tagging - task :commit_release do - Bundler::GemHelper.new.send(:guard_clean) - sh "git commit --allow-empty -a -m 'Release #{Asciidoctor::VERSION}'" - end -rescue LoadError -end - -desc 'Open an irb session preloaded with this library' -task :console do - sh 'bundle console', :verbose => false -end +load 'Rakefile' +task :default => 'test:all' diff -Nru asciidoctor-1.5.5/debian/rules asciidoctor-2.0.10/debian/rules --- asciidoctor-1.5.5/debian/rules 2017-07-02 09:34:55.000000000 +0000 +++ asciidoctor-2.0.10/debian/rules 2019-08-18 21:46:44.000000000 +0000 @@ -1,6 +1,8 @@ #!/usr/bin/make -f export GEM2DEB_TEST_RUNNER = --check-dependencies +export ASCIIDOCTOR_MANPAGE_PATH = $(CURDIR)/man/asciidoctor.1 +# export ASCIIDOCTOR_LIB_DIR = $(CURDIR)/debian/ruby-asciidoctor/usr/lib/ruby/vendor_ruby %: dh $@ --buildsystem=ruby --with ruby @@ -13,18 +15,24 @@ dh_auto_build # build documentation rdoc --main=README.adoc lib + rm -f doc/js/*.js.gz override_dh_auto_install: - dh_install + # needed since we split the packages + install -d debian/ruby-asciidoctor/usr/share/ruby-asciidoctor + cp -r data/* debian/ruby-asciidoctor/usr/share/ruby-asciidoctor dh_auto_install # remove asciidoctor-safe - rm -f debian/asciidoctor/usr/bin/asciidoctor-safe + rm -f debian/ruby-asciidoctor/usr/bin/asciidoctor-safe # remove bundled jquery.js rm -f debian/asciidoctor-doc/usr/share/doc/asciidoctor-doc/html/js/jquery.js + mkdir -p debian/asciidoctor/usr/bin + mv debian/ruby-asciidoctor/usr/bin/* debian/asciidoctor/usr/bin/ + rmdir debian/ruby-asciidoctor/usr/bin override_dh_installchangelogs: dh_installchangelogs -O--buildsystem=ruby CHANGELOG.adoc override_dh_installdocs: - bin/asciidoctor README*.adoc + bin/asciidoctor -a webfonts! -a icons! -a image-uri-screenshot! README*.adoc dh_installdocs diff -Nru asciidoctor-1.5.5/debian/salsa-ci.yml asciidoctor-2.0.10/debian/salsa-ci.yml --- asciidoctor-1.5.5/debian/salsa-ci.yml 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/debian/salsa-ci.yml 2019-08-18 16:19:20.000000000 +0000 @@ -0,0 +1,4 @@ +--- +include: + - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/salsa-ci.yml + - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/pipeline-jobs.yml diff -Nru asciidoctor-1.5.5/debian/tests/control asciidoctor-2.0.10/debian/tests/control --- asciidoctor-1.5.5/debian/tests/control 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/debian/tests/control 2019-08-18 21:46:44.000000000 +0000 @@ -0,0 +1,7 @@ +Tests: upstream-tests +Depends: @, @builddeps@ +Restrictions: allow-stderr + +Tests: generate-man +Depends: @ +Restrictions: allow-stderr diff -Nru asciidoctor-1.5.5/debian/tests/generate-man asciidoctor-2.0.10/debian/tests/generate-man --- asciidoctor-1.5.5/debian/tests/generate-man 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/debian/tests/generate-man 2019-08-18 21:46:44.000000000 +0000 @@ -0,0 +1,122 @@ +#!/bin/sh + +set -exu + +# This is a test that generates a man page and compares the output to what's expected. +cd ${AUTOPKGTEST_TMP} + +export SOURCE_DATE_EPOCH=1566164804 +cat > example.adoc << __EOF__ += example(1) +Joseph Herlant +v1.0.0 +:doctype: manpage +:manmanual: EXAMPLE +:mansource: EXAMPLE + +== Name + +example - an example of manpage + +== Synopsis + +*example* [_OPTION_]... _FILE_... + +== Options + +*-o, --out-file*=_OUT_FILE_:: + Write result to file _OUT_FILE_. + +== Exit status + +*0*:: + Success. + Foo! + +*1*:: + Failure. + Bar :( + +== Resources + +*Project web site:* http://example.org + +== Copying + +Copyright (C) 2019 {author}. + +Free use of this software is granted under the terms of the MIT License. +__EOF__ + +cat > example.ref << __EOF__ +'\" t +.\" Title: example +.\" Author: Joseph Herlant +.\" Date: 2019-08-18 +.\" Manual: EXAMPLE +.\" Source: EXAMPLE +.\" Language: English +.\" +.TH "EXAMPLE" "1" "2019-08-18" "EXAMPLE" "EXAMPLE" +.ie \n(.g .ds Aq \(aq +.el .ds Aq ' +.ss \n[.ss] 0 +.nh +.ad l +.de URL +\fI\\\\\$2\fP <\\\\\$1>\\\\\$3 +.. +.als MTO URL +.if \n[.g] \{\\ +. mso www.tmac +. am URL +. ad l +. . +. am MTO +. ad l +. . +. LINKSTYLE blue R < > +.\} +.SH "NAME" +example \- an example of manpage +.SH "SYNOPSIS" +.sp +\fBexample\fP [\fIOPTION\fP]... \fIFILE\fP... +.SH "OPTIONS" +.sp +\fB\-o, \-\-out\-file\fP=\fIOUT_FILE\fP +.RS 4 +Write result to file \fIOUT_FILE\fP. +.RE +.SH "EXIT STATUS" +.sp +\fB0\fP +.RS 4 +Success. +Foo! +.RE +.sp +\fB1\fP +.RS 4 +Failure. +Bar :( +.RE +.SH "RESOURCES" +.sp +\fBProject web site:\fP \c +.URL "http://example.org" "" "" +.SH "COPYING" +.sp +Copyright \(co 2019 Joseph Herlant. +.br +Free use of this software is granted under the terms of the MIT License. +.SH "AUTHOR" +.sp +Joseph Herlant +__EOF__ + +asciidoctor -b manpage example.adoc +# Removing the generator as it contains the version so we would have to change that all the time +sed '/Generator/d' -i example.1 +# Remove the newline character at the end of the ref file +truncate -s -1 example.ref +diff example.ref example.1 diff -Nru asciidoctor-1.5.5/debian/tests/upstream-tests asciidoctor-2.0.10/debian/tests/upstream-tests --- asciidoctor-1.5.5/debian/tests/upstream-tests 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/debian/tests/upstream-tests 2019-08-18 21:46:44.000000000 +0000 @@ -0,0 +1,22 @@ +#!/bin/sh + +set -exu + +# Since upstream don't distribute test files anymore and gem2deb still pull things from rubygems.org, tests are not in +# the package anymore so we only keep the tests in the source package for now and copy them for autopkgtest +cp -rv test features $AUTOPKGTEST_TMP + +cat > ${AUTOPKGTEST_TMP}/Rakefile << __EOF__ +require 'gem2deb/rake/testtask' + +Gem2Deb::Rake::TestTask.new do |t| + t.libs = ['test'] + t.test_files = FileList['test/**/*_test.rb'] + FileList['test/**/test_*.rb'] +end +__EOF__ + +cd $AUTOPKGTEST_TMP +export ASCIIDOCTOR_MANPAGE_PATH='/usr/share/man/man1/asciidoctor.1.gz' +export ASCIIDOCTOR_LIB_DIR='/usr/lib/ruby/vendor_ruby' +export RUBYOPT=-w +rake test diff -Nru asciidoctor-1.5.5/debian/watch asciidoctor-2.0.10/debian/watch --- asciidoctor-1.5.5/debian/watch 2017-07-02 09:34:55.000000000 +0000 +++ asciidoctor-2.0.10/debian/watch 2019-08-18 16:19:20.000000000 +0000 @@ -1,2 +1,3 @@ -version=3 -http://pkg-ruby-extras.alioth.debian.org/cgi-bin/gemwatch/asciidoctor .*/asciidoctor-(.*).tar.gz +version=4 +opts=filenamemangle=s/.+\/v?(\d\S+)\.tar\.gz/asciidoctor-$1\.tar\.gz/ \ + https://github.com/asciidoctor/asciidoctor/tags .*/v?(\d\S+)\.tar\.gz diff -Nru asciidoctor-1.5.5/features/step_definitions.rb asciidoctor-2.0.10/features/step_definitions.rb --- asciidoctor-1.5.5/features/step_definitions.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/features/step_definitions.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,14 +1,11 @@ -# encoding: UTF-8 -ASCIIDOCTOR_PROJECT_DIR = File.dirname File.dirname(__FILE__) -Dir.chdir ASCIIDOCTOR_PROJECT_DIR - -if RUBY_VERSION < '1.9' - require 'rubygems' -end +# frozen_string_literal: true +ASCIIDOCTOR_FEATURES_DIR = File.absolute_path __dir__ +ASCIIDOCTOR_LIB_DIR = ENV['ASCIIDOCTOR_LIB_DIR'] || File.join(ASCIIDOCTOR_FEATURES_DIR, '../lib') require 'simplecov' if ENV['COVERAGE'] == 'true' -require File.join(ASCIIDOCTOR_PROJECT_DIR, 'lib', 'asciidoctor') +require File.join ASCIIDOCTOR_LIB_DIR, 'asciidoctor' +Dir.chdir Asciidoctor::ROOT_DIR require 'rspec/expectations' require 'tilt' @@ -20,34 +17,27 @@ When /it is converted to html/ do @output = Asciidoctor.convert @source - #File.open('/tmp/test.adoc', 'w') {|f| f.write @source } - #@output = %x{asciidoc -f compat/asciidoc.conf -o - -s /tmp/test.adoc | XMLLINT_INDENT='' xmllint --format - | tail -n +2}.rstrip - ##@output = %x{asciidoc -f compat/asciidoc.conf -o - -s /tmp/test.adoc} end When /it is converted to docbook/ do - @output = Asciidoctor.convert @source, :backend => :docbook + @output = Asciidoctor.convert @source, backend: :docbook end -Then /the result should match the (HTML|XML) source/ do |format, expect| - @output.should == expect +Then /the result should (match|contain) the (HTML|XML) source/ do |matcher, format, expected| + match_expectation = matcher == 'match' ? (eq expected) : (include expected) + (expect @output).to match_expectation end -Then /the result should match the (HTML|XML) structure/ do |format, expect| - case format - when 'HTML' - options = {:format => :html5} - when 'XML' - options = {:format => :xhtml} - else - options = {} +Then /the result should (match|contain) the (HTML|XML) structure/ do |matcher, format, expected| + result = @output + if format == 'HTML' + options = { format: :html, disable_escape: true, sort_attrs: false } + else # format == 'XML' + options = { format: :xhtml, disable_escape: true, sort_attrs: false } + result = result.gsub '"/>', '" />' if result.include? '"/>' end - slim_friendly_output = @output.lines.entries.map {|line| - if line.start_with? '<' - line - else - %(|#{line}) - end - }.join - Slim::Template.new(options) { slim_friendly_output }.render.should == Slim::Template.new(options) { expect }.render + result = Slim::Template.new(options) { result.each_line.map {|l| (l.start_with? '<') ? l : %(|#{l}) }.join }.render + expected = Slim::Template.new(options) { expected }.render + match_expectation = matcher == 'match' ? (eq expected) : (include expected) + (expect result).to match_expectation end diff -Nru asciidoctor-1.5.5/features/xref.feature asciidoctor-2.0.10/features/xref.feature --- asciidoctor-1.5.5/features/xref.feature 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/features/xref.feature 2019-08-18 16:11:54.000000000 +0000 @@ -1,9 +1,708 @@ # language: en Feature: Cross References - In order to create links to other sections + In order to create cross references between sections and blocks in the current or neighboring document As a writer - I want to be able to use a cross reference macro + I want to be able to use the cross reference macro to compose these references + Scenario: Create a cross reference to a block that has explicit reftext + Given the AsciiDoc source + """ + :xrefstyle: full + + See <> to learn how it works. + + .Parameterized Type + [[param-type-t,that "" thing]] + **** + This sidebar describes what that thing is all about. + **** + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#param-type-t' that "<T>" thing + |to learn how it works. + """ + When it is converted to docbook + Then the result should match the XML structure + """ + simpara + |See + xref<> linkend='param-type-t'/ + |to learn how it works. + sidebar xml:id='param-type-t' xreflabel='that "<T>" thing' + title Parameterized Type <T> + simpara This sidebar describes what that <T> thing is all about. + """ + + Scenario: Create a cross reference to a block that has explicit reftext with formatting + Given the AsciiDoc source + """ + :xrefstyle: full + + There are cats, then there are the <>. + + [[big-cats,*big* cats]] + == Big Cats + + So ferocious. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |There are cats, then there are the + a< href='#big-cats' big cats + |. + """ + When it is converted to docbook + Then the result should match the XML structure + """ + simpara + |There are cats, then there are the + xref< linkend='big-cats'/ + |. + section xml:id='big-cats' xreflabel='big cats' + title Big Cats + simpara So ferocious. + """ + + Scenario: Create a full cross reference to a numbered section + Given the AsciiDoc source + """ + :sectnums: + :xrefstyle: full + + See <> to find a complete list of features. + + == About + + [#sect-features] + === Features + + All the features are listed in this section. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#sect-features' Section 1.1, “Features” + |to find a complete list of features. + """ + + Scenario: Create a short cross reference to a numbered section + Given the AsciiDoc source + """ + :sectnums: + :xrefstyle: short + + See <> to find a complete list of features. + + [#sect-features] + == Features + + All the features are listed in this section. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#sect-features' Section 1 + |to find a complete list of features. + """ + + Scenario: Create a basic cross reference to an unnumbered section + Given the AsciiDoc source + """ + :xrefstyle: full + + See <> to find a complete list of features. + + [#sect-features] + == Features + + All the features are listed in this section. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#sect-features' Features + |to find a complete list of features. + """ + + Scenario: Create a basic cross reference to a numbered section when the section reference signifier is disabled + Given the AsciiDoc source + """ + :sectnums: + :xrefstyle: full + :!section-refsig: + + See <> to find a complete list of features. + + [#sect-features] + == Features + + All the features are listed in this section. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#sect-features' 1, “Features” + |to find a complete list of features. + """ + + Scenario: Create a full cross reference to a numbered chapter + Given the AsciiDoc source + """ + :doctype: book + :sectnums: + :xrefstyle: full + + See <> to find a complete list of features. + + [#chap-features] + == Features + + All the features are listed in this chapter. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#chap-features' Chapter 1, Features + |to find a complete list of features. + """ + + Scenario: Create a short cross reference to a numbered chapter + Given the AsciiDoc source + """ + :doctype: book + :sectnums: + :xrefstyle: short + + See <> to find a complete list of features. + + [#chap-features] + == Features + + All the features are listed in this chapter. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#chap-features' Chapter 1 + |to find a complete list of features. + """ + + Scenario: Create a basic cross reference to a numbered chapter + Given the AsciiDoc source + """ + :doctype: book + :sectnums: + :xrefstyle: basic + + See <> to find a complete list of features. + + [#chap-features] + == Features + + All the features are listed in this chapter. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#chap-features' Features + |to find a complete list of features. + """ + + Scenario: Create a basic cross reference to an unnumbered chapter + Given the AsciiDoc source + """ + :doctype: book + :xrefstyle: full + + See <> to find a complete list of features. + + [#chap-features] + == Features + + All the features are listed in this chapter. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#chap-features' Features + |to find a complete list of features. + """ + + Scenario: Create a cross reference to a chapter using a custom chapter reference signifier + Given the AsciiDoc source + """ + :doctype: book + :sectnums: + :xrefstyle: full + :chapter-refsig: Ch + + See <> to find a complete list of features. + + [#chap-features] + == Features + + All the features are listed in this chapter. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#chap-features' Ch 1, Features + |to find a complete list of features. + """ + + Scenario: Create a full cross reference to a numbered part + Given the AsciiDoc source + """ + :doctype: book + :sectnums: + :partnums: + :xrefstyle: full + + [preface] + = Preface + + See <> for an introduction to the language. + + [#p1] + = Language + + == Syntax + + This chapter covers the syntax. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#p1' Part I, “Language” + |for an introduction to the language. + """ + + Scenario: Create a short cross reference to a numbered part + Given the AsciiDoc source + """ + :doctype: book + :sectnums: + :partnums: + :xrefstyle: short + + [preface] + = Preface + + See <> for an introduction to the language. + + [#p1] + = Language + + == Syntax + + This chapter covers the syntax. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#p1' Part I + |for an introduction to the language. + """ + + Scenario: Create a basic cross reference to a numbered part + Given the AsciiDoc source + """ + :doctype: book + :sectnums: + :partnums: + :xrefstyle: basic + + [preface] + = Preface + + See <> for an introduction to the language. + + [#p1] + = Language + + == Syntax + + This chapter covers the syntax. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#p1' Language + |for an introduction to the language. + """ + + Scenario: Create a basic cross reference to an unnumbered part + Given the AsciiDoc source + """ + :doctype: book + :sectnums: + :xrefstyle: full + + [preface] + = Preface + + See <> for an introduction to the language. + + [#p1] + = Language + + == Syntax + + This chapter covers the syntax. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#p1' Language + |for an introduction to the language. + """ + + @wip + Scenario: Create a cross reference to a part using a custom part reference signifier + Given the AsciiDoc source + """ + :doctype: book + :sectnums: + :partnums: + :xrefstyle: full + :part-refsig: P + + [preface] + = Preface + + See <> for an introduction to the language. + + [#p1] + = Language + + == Syntax + + This chapter covers the syntax. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#p1' P I, “Language” + |for an introduction to the language. + """ + + Scenario: Create a full cross reference to a numbered appendix + Given the AsciiDoc source + """ + :sectnums: + :xrefstyle: full + + See <> to find a complete list of features. + + [appendix#app-features] + == Features + + All the features are listed in this appendix. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#app-features' Appendix A, Features + |to find a complete list of features. + """ + + Scenario: Create a short cross reference to a numbered appendix + Given the AsciiDoc source + """ + :sectnums: + :xrefstyle: short + + See <> to find a complete list of features. + + [appendix#app-features] + == Features + + All the features are listed in this appendix. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#app-features' Appendix A + |to find a complete list of features. + """ + + Scenario: Create a full cross reference to an appendix even when section numbering is disabled + Given the AsciiDoc source + """ + :xrefstyle: full + + See <> to find a complete list of features. + + [appendix#app-features] + == Features + + All the features are listed in this appendix. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#app-features' Appendix A, Features + |to find a complete list of features. + """ + + Scenario: Create a full cross reference to a numbered formal block + Given the AsciiDoc source + """ + :xrefstyle: full + + See <> to find a table of features. + + .Features + [#tbl-features%autowidth] + |=== + |Text formatting |Formats text for display. + |=== + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#tbl-features' Table 1, “Features” + |to find a table of features. + """ + + Scenario: Create a short cross reference to a numbered formal block + Given the AsciiDoc source + """ + :xrefstyle: short + + See <> to find a table of features. + + .Features + [#tbl-features%autowidth] + |=== + |Text formatting |Formats text for display. + |=== + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#tbl-features' Table 1 + |to find a table of features. + """ + + Scenario: Create a basic cross reference to a numbered formal block when the caption prefix is disabled + Given the AsciiDoc source + """ + :xrefstyle: full + :!table-caption: + + See <> to find a table of features. + + .Features + [#tbl-features%autowidth] + |=== + |Text formatting |Formats text for display. + |=== + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#tbl-features' Features + |to find a table of features. + """ + + Scenario: Create a cross reference to a numbered formal block with a custom caption prefix + Given the AsciiDoc source + """ + :xrefstyle: full + :table-caption: Tbl + + See <> to find a table of features. + + .Features + [#tbl-features%autowidth] + |=== + |Text formatting |Formats text for display. + |=== + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#tbl-features' Tbl 1, “Features” + |to find a table of features. + """ + + Scenario: Create a full cross reference to a formal image block + Given the AsciiDoc source + """ + :xrefstyle: full + + Behold, <>! + + .The ferocious Ghostscript tiger + [#tiger] + image::tiger.svg[Ghostscript tiger] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .paragraph: p + |Behold, + a< href='#tiger' Figure 1, “The ferocious Ghostscript tiger” + |! + #tiger.imageblock + .content: img src='tiger.svg' alt='Ghostscript tiger' + .title Figure 1. The ferocious Ghostscript tiger + """ + + Scenario: Create a short cross reference to a formal image block + Given the AsciiDoc source + """ + :xrefstyle: short + + Behold, <>! + + .The ferocious Ghostscript tiger + [#tiger] + image::tiger.svg[Ghostscript tiger] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .paragraph: p + |Behold, + a< href='#tiger' Figure 1 + |! + #tiger.imageblock + .content: img src='tiger.svg' alt='Ghostscript tiger' + .title Figure 1. The ferocious Ghostscript tiger + """ + + Scenario: Create a full cross reference to a block with an explicit caption + Given the AsciiDoc source + """ + :xrefstyle: full + + See <> and <>. + + .Managing Orders + [#diagram-1,caption="Diagram {counter:diag-number}. "] + image::managing-orders.png[Managing Orders] + + .Managing Inventory + [#diagram-2,caption="Diagram {counter:diag-number}. "] + image::managing-inventory.png[Managing Inventory] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .paragraph: p + |See + a<> href='#diagram-1' Diagram 1, “Managing Orders” + |and + a< href='#diagram-2' Diagram 2, “Managing Inventory” + |. + #diagram-1.imageblock + .content: img src='managing-orders.png' alt='Managing Orders' + .title Diagram 1. Managing Orders + #diagram-2.imageblock + .content: img src='managing-inventory.png' alt='Managing Inventory' + .title Diagram 2. Managing Inventory + """ + + Scenario: Create a short cross reference to a block with an explicit caption + Given the AsciiDoc source + """ + :xrefstyle: short + + See <> and <>. + + .Managing Orders + [#diagram-1,caption="Diagram {counter:diag-number}. "] + image::managing-orders.png[Managing Orders] + + .Managing Inventory + [#diagram-2,caption="Diagram {counter:diag-number}. "] + image::managing-inventory.png[Managing Inventory] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .paragraph: p + |See + a<> href='#diagram-1' Diagram 1 + |and + a< href='#diagram-2' Diagram 2 + |. + #diagram-1.imageblock + .content: img src='managing-orders.png' alt='Managing Orders' + .title Diagram 1. Managing Orders + #diagram-2.imageblock + .content: img src='managing-inventory.png' alt='Managing Inventory' + .title Diagram 2. Managing Inventory + """ + + Scenario: Create a basic cross reference to an unnumbered formal block + Given the AsciiDoc source + """ + :xrefstyle: full + + See <> to find the data used in this report. + + .Data + [#data] + .... + a + b + c + .... + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |See + a<> href='#data' Data + |to find the data used in this report. + """ + + Scenario: Use title as cross reference text to refer to a formal admonition block + Given the AsciiDoc source + """ + :xrefstyle: full + + Recall in <>, we told you how to speed up this process. + + .Essential tip #1 + [#essential-tip-1] + TIP: You can speed up this process by pressing the turbo button. + """ + When it is converted to html + Then the result should contain the HTML structure + """ + |Recall in + a< href='#essential-tip-1' Essential tip #1 + |, we told you how to speed up this process. + """ Scenario: Create a cross reference from an AsciiDoc cell to a section Given the AsciiDoc source @@ -11,39 +710,38 @@ |=== a|See <<_install>> |=== - + == Install - + Instructions go here. """ When it is converted to html Then the result should match the HTML structure """ - table.tableblock.frame-all.grid-all.spread + table.tableblock.frame-all.grid-all.stretch colgroup col style='width: 100%;' tbody tr td.tableblock.halign-left.valign-top - div + div.content .paragraph: p - 'See - a href='#_install' Install + |See + a< href='#_install' Install .sect1 h2#_install Install .sectionbody .paragraph: p Instructions go here. """ - - Scenario: Create a cross reference using the target section title + Scenario: Create a cross reference using the title of the target section Given the AsciiDoc source """ == Section One content - == Section Two + == Section Two, continued from <
> refer to <
> """ @@ -54,14 +752,15 @@ h2#_section_one Section One .sectionbody: .paragraph: p content .sect1 - h2#_section_two Section Two + h2#_section_two_continued_from_section_one + |Section Two, continued from + a< href='#_section_one' Section One .sectionbody: .paragraph: p - 'refer to - a href='#_section_one' Section One + |refer to + a< href='#_section_one' Section One """ - - Scenario: Create a cross reference using the target reftext + Scenario: Create a cross reference using the reftext of the target section Given the AsciiDoc source """ [reftext="the first section"] @@ -82,12 +781,23 @@ .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p - 'refer to - a href='#_section_one' the first section + |refer to + a< href='#_section_one' the first section + """ + When it is converted to docbook + Then the result should match the XML structure + """ + section xml:id='_section_one' xreflabel='the first section' + title Section One + simpara content + section xml:id='_section_two' + title Section Two + simpara + |refer to + xref< linkend='_section_one'/ """ - - Scenario: Create a cross reference using the formatted target title + Scenario: Create a cross reference using the formatted title of the target section Given the AsciiDoc source """ == Section *One* @@ -102,15 +812,230 @@ Then the result should match the HTML structure """ .sect1 - h2#_section_strong_one_strong - 'Section - strong One + h2#_section_one + |Section One + .sectionbody: .paragraph: p content + .sect1 + h2#_section_two Section Two + .sectionbody: .paragraph: p + |refer to + a< href='#_section_one' Section One + """ + + Scenario: Does not process a natural cross reference in compat mode + Given the AsciiDoc source + """ + :compat-mode: + + == Section One + + content + + == Section Two + + refer to <
> + """ + When it is converted to html + Then the result should match the HTML structure + """ + .sect1 + h2#_section_one + |Section One + .sectionbody: .paragraph: p content + .sect1 + h2#_section_two Section Two + .sectionbody: .paragraph: p + |refer to + a< href='#Section One' [Section One] + """ + + Scenario: Parses text of xref macro as attributes if attribute signature found + Given the AsciiDoc source + """ + == Section One + + content + + == Section Two + + refer to xref:_section_one[role=next] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .sect1 + h2#_section_one + |Section One + .sectionbody: .paragraph: p content + .sect1 + h2#_section_two Section Two + .sectionbody: .paragraph: p + |refer to + a< href='#_section_one' class='next' Section One + """ + + Scenario: Does not parse text of xref macro as attribute if attribute signature not found + Given the AsciiDoc source + """ + == Section One + + content + + == Section Two + + refer to xref:_section_one[One, Section One] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .sect1 + h2#_section_one + |Section One + .sectionbody: .paragraph: p content + .sect1 + h2#_section_two Section Two + .sectionbody: .paragraph: p + |refer to + a< href='#_section_one' One, Section One + """ + + Scenario: Uses whole text of xref macro as link text if attribute signature found and text is enclosed in double quotes + Given the AsciiDoc source + """ + == Section One + + content + + == Section Two + + refer to xref:_section_one["Section One == Starting Point"] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .sect1 + h2#_section_one + |Section One + .sectionbody: .paragraph: p content + .sect1 + h2#_section_two Section Two + .sectionbody: .paragraph: p + |refer to + a< href='#_section_one' + |Section One == Starting Point + """ + + Scenario: Does not parse text of xref macro as text if enclosed in double quotes but attribute signature not found + Given the AsciiDoc source + """ + == Section One + + content + + == Section Two + + refer to xref:_section_one["The Premier Section"] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .sect1 + h2#_section_one + |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p - 'refer to - a href='#_section_strong_one_strong' - 'Section - strong One + |refer to + a< href='#_section_one' "The Premier Section" + """ + + Scenario: Can escape double quotes in text of xref macro using backslashes when text is parsed as attributes + Given the AsciiDoc source + """ + == Section One + + content + + == Section Two + + refer to xref:_section_one["\"The Premier Section\"",role=spotlight] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .sect1 + h2#_section_one + |Section One + .sectionbody: .paragraph: p content + .sect1 + h2#_section_two Section Two + .sectionbody: .paragraph: p + |refer to + a< href='#_section_one' class='spotlight' "The Premier Section" + """ + + Scenario: Override xrefstyle for a given part of the document + Given the AsciiDoc source + """ + :xrefstyle: full + :doctype: book + :sectnums: + + == Foo + + refer to <<#_bar>> + + == Bar + :xrefstyle: short + + refer to xref:#_foo[xrefstyle=short] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .sect1 + h2#_foo 1. Foo + .sectionbody: .paragraph: p + |refer to + a< href='#_bar' Chapter 2, Bar + .sect1 + h2#_bar 2. Bar + .sectionbody: .paragraph: p + |refer to + a< href='#_foo' Chapter 1 + """ + + Scenario: Override xrefstyle for a specific reference by assigning the xrefstyle attribute on the xref macro + Given the AsciiDoc source + """ + :xrefstyle: full + :doctype: book + :sectnums: + + == Foo + + content + + == Bar + + refer to <<#_foo>> + + refer to xref:#_foo[xrefstyle=short] + """ + When it is converted to html + Then the result should match the HTML structure + """ + .sect1 + h2#_foo 1. Foo + .sectionbody: .paragraph: p content + .sect1 + h2#_bar 2. Bar + .sectionbody + .paragraph: p + |refer to + a< href='#_foo' Chapter 1, Foo + .paragraph: p + |refer to + a< href='#_foo' Chapter 1 """ diff -Nru asciidoctor-1.5.5/Gemfile asciidoctor-2.0.10/Gemfile --- asciidoctor-1.5.5/Gemfile 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/Gemfile 2019-08-18 16:11:54.000000000 +0000 @@ -3,8 +3,17 @@ # Look in asciidoctor.gemspec for runtime and development dependencies gemspec +group :development do + gem 'pygments.rb' if ENV['PYGMENTS'] +end + +group :doc do + gem 'yard' + gem 'yard-tomdoc' +end + # enable this group to use Guard for continuous testing -# after removing comments, run `bundle install` then `guard` +# after removing comments, run `bundle install` then `guard` #group :guardtest do # gem 'guard' # gem 'guard-test' @@ -13,11 +22,5 @@ #end group :ci do - gem 'simplecov', '~> 0.9.1' - if ENV['SHIPPABLE'] - gem 'simplecov-csv', '~> 0.1.3' - gem 'ci_reporter', '~> 2.0.0' - gem 'ci_reporter_minitest', '~> 1.0.0' - #gem 'ci_reporter_cucumber', '~> 1.0.0' - end + gem 'simplecov', '~> 0.16.0' end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/abstract_block.rb asciidoctor-2.0.10/lib/asciidoctor/abstract_block.rb --- asciidoctor-1.5.5/lib/asciidoctor/abstract_block.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/abstract_block.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,48 +1,51 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor class AbstractBlock < AbstractNode - # Public: The types of content that this block can accomodate - attr_accessor :content_model + # Public: Get the Array of {AbstractBlock} child blocks for this block. Only applies if content model is :compound. + attr_reader :blocks - # Public: Substitutions to be applied to content in this block - attr_reader :subs + # Public: Set the caption for this block. + attr_writer :caption - # Public: Get the Array of Asciidoctor::AbstractBlock sub-blocks for this block - attr_reader :blocks + # Public: Describes the type of content this block accepts and how it should be converted. Acceptable values are: + # * :compound - this block contains other blocks + # * :simple - this block holds a paragraph of prose that receives normal substitutions + # * :verbatim - this block holds verbatim text (displayed "as is") that receives verbatim substitutions + # * :raw - this block holds unprocessed content passed directly to the output with no sustitutions applied + # * :empty - this block has no content + attr_accessor :content_model - # Public: Set the Integer level of this Section or the Section level in which this Block resides + # Public: Set the Integer level of this {Section} or the level of the Section to which this {AbstractBlock} belongs. attr_accessor :level - # Public: Set the String block title. - attr_writer :title + # Public: Get/Set the String numeral of this block (if section, relative to parent, otherwise absolute). + # Only assigned to section if automatic section numbering is enabled. + # Only assigned to formal block (block with title) if corresponding caption attribute is present. + attr_accessor :numeral + + # Public: Gets/Sets the location in the AsciiDoc source where this block begins. + attr_accessor :source_location # Public: Get/Set the String style (block type qualifier) for this block. attr_accessor :style - # Public: Get/Set the caption for this block - attr_accessor :caption - - # Public: Gets/Sets the location in the AsciiDoc source where this block begins - attr_accessor :source_location + # Public: Substitutions to be applied to content in this block. + attr_reader :subs def initialize parent, context, opts = {} super @content_model = :compound - @subs = [] - @default_subs = nil @blocks = [] - @id = nil - @title = nil - @caption = nil - @style = nil - @level = if context == :document - 0 - elsif parent && context != :section - parent.level + @subs = [] + @id = @title = @caption = @numeral = @style = @default_subs = @source_location = nil + if context == :document || context == :section + @level = @next_section_index = 0 + @next_section_ordinal = 1 + elsif AbstractBlock === parent + @level = parent.level + else + @level = nil end - @next_section_index = 0 - @next_section_number = 1 - @source_location = nil end def block? @@ -53,13 +56,14 @@ false end - # Public: Update the context of this block. - # - # This method changes the context of this block. It also - # updates the node name accordingly. - def context=(context) - @context = context - @node_name = context.to_s + # Public: Get the source file where this block started + def file + @source_location && @source_location.file + end + + # Public: Get the source line number where this block started + def lineno + @source_location && @source_location.lineno end # Public: Get the converted String content for this Block. If the block @@ -71,85 +75,24 @@ converter.convert self end - # Alias render to convert to maintain backwards compatibility - alias :render :convert + # Deprecated: Use {AbstractBlock#convert} instead. + alias render convert # Public: Get the converted result of the child blocks by converting the # children appropriate to content model that this block supports. def content - @blocks.map {|b| b.convert } * EOL - end - - # Public: Get the source file where this block started - def file - @source_location ? @source_location.file : nil - end - - # Public: Get the source line number where this block started - def lineno - @source_location ? @source_location.lineno : nil - end - - # Public: A convenience method that checks whether the specified - # substitution is enabled for this block. - # - # name - The Symbol substitution name - # - # Returns A Boolean indicating whether the specified substitution is - # enabled for this block - def sub? name - @subs.include? name - end - - # Public: A convenience method that indicates whether the title instance - # variable is blank (nil or empty) - def title? - !@title.nil_or_empty? - end - - # Public: Get the String title of this Block with title substitions applied - # - # The following substitutions are applied to block and section titles: - # - # :specialcharacters, :quotes, :replacements, :macros, :attributes and :post_replacements - # - # Examples - # - # block.title = "Foo 3^ # {two-colons} Bar(1)" - # block.title - # => "Foo 3^ # :: Bar(1)" - # - # Returns the String title of this Block - def title - # prevent substitutions from being applied multiple times - if defined?(@subbed_title) - @subbed_title - elsif @title - @subbed_title = apply_title_subs(@title) - else - @title - end + @blocks.map {|b| b.convert }.join LF end - # Public: Convenience method that returns the interpreted title of the Block - # with the caption prepended. + # Public: Update the context of this block. # - # Concatenates the value of this Block's caption instance variable and the - # return value of this Block's title method. No space is added between the - # two values. If the Block does not have a caption, the interpreted title is - # returned. + # This method changes the context of this block. It also updates the node name accordingly. # - # Returns the String title prefixed with the caption, or just the title if no - # caption is set - def captioned_title - %(#{@caption}#{title}) - end - - # Public: Determine whether this Block contains block content + # context - the context Symbol context to assign to this block # - # Returns A Boolean indicating whether this Block has block content - def blocks? - !@blocks.empty? + # Returns the new context Symbol assigned to this block + def context= context + @node_name = (@context = context).to_s end # Public: Append a content block to this block's list of blocks. @@ -158,10 +101,10 @@ # # Examples # - # block = Block.new(parent, :preamble, :content_model => :compound) + # block = Block.new(parent, :preamble, content_model: :compound) # - # block << Block.new(block, :paragraph, :source => 'p1') - # block << Block.new(block, :paragraph, :source => 'p2') + # block << Block.new(block, :paragraph, source: 'p1') + # block << Block.new(block, :paragraph, source: 'p2') # block.blocks? # # => true # block.blocks.size @@ -169,39 +112,19 @@ # # Returns The parent Block def << block - # parent assignment pending refactor - #block.parent = self + block.parent = self unless block.parent == self @blocks << block self end # NOTE append alias required for adapting to a Java API - alias :append :<< + alias append << - # Public: Get the Array of child Section objects - # - # Only applies to Document and Section instances - # - # Examples - # - # doc << (sect1 = Section.new doc, 1, false) - # sect1.title = 'Section 1' - # para1 = Block.new sect1, :paragraph, :source => 'Paragraph 1' - # para2 = Block.new sect1, :paragraph, :source => 'Paragraph 2' - # sect1 << para1 << para2 - # sect1 << (sect1_1 = Section.new sect1, 2, false) - # sect1_1.title = 'Section 1.1' - # sect1_1 << (Block.new sect1_1, :paragraph, :source => 'Paragraph 3') - # sect1.blocks? - # # => true - # sect1.blocks.size - # # => 3 - # sect1.sections.size - # # => 1 + # Public: Determine whether this Block contains block content # - # Returns an [Array] of Section objects - def sections - @blocks.select {|block| block.context == :section } + # Returns A Boolean indicating whether this Block has block content + def blocks? + @blocks.empty? ? false : true end # Public: Check whether this block has any child Section objects. @@ -213,41 +136,19 @@ @next_section_index > 0 end -# stage the Enumerable mixin until we're sure we've got it right -=begin - include ::Enumerable - - # Public: Yield the block on this block node and all its descendant - # block node children to satisfy the Enumerable contract. - # - # Returns nothing - def each &block - # yucky, dlist is a special case - if @context == :dlist - @blocks.flatten.each &block - else - #yield self.header if @context == :document && header? - @blocks.each &block - end - end - - #-- - # TODO is there a way to make this lazy? - def each_recursive &block - block = lambda {|node| node } unless block_given? - results = [] - self.each do |node| - results << block.call(node) - results.concat(node.each_recursive(&block)) if ::Enumerable === node - end - block_given? ? results : results.to_enum + # Deprecated: Legacy property to get the String or Integer numeral of this section. + def number + (Integer @numeral) rescue @numeral end -=end - # Public: Query for all descendant block-level nodes in the document tree - # that match the specified selector (context, style, id, and/or role). If a - # Ruby block is given, it's used as an additional filter. If no selector or - # Ruby block is supplied, all block-level nodes in the tree are returned. + # Public: Walk the document tree and find all block-level nodes that match the specified selector (context, style, id, + # role, and/or custom filter). + # + # If a Ruby block is given, it's applied as a supplemental filter. If the filter returns true (which implies :accept), + # the node is accepted and node traversal continues. If the filter returns false (which implies :skip), the node is + # skipped, but its children are still visited. If the filter returns :reject, the node and all its descendants are + # rejected. If the filter returns :prune, the node is accepted, but its descendants are rejected. If no selector + # or filter block is supplied, all block-level nodes in the tree are returned. # # Examples # @@ -265,94 +166,91 @@ #-- # TODO support jQuery-style selector (e.g., image.thumb) def find_by selector = {}, &block - result = [] + find_by_internal selector, (result = []), &block + rescue ::StopIteration + result + end - if ((any_context = !(context_selector = selector[:context])) || context_selector == @context) && - (!(style_selector = selector[:style]) || style_selector == @style) && - (!(role_selector = selector[:role]) || (has_role? role_selector)) && - (!(id_selector = selector[:id]) || id_selector == @id) - if id_selector - if block_given? - return (yield self) ? [self] : result - else - return [self] - end - elsif block_given? - result << self if (yield self) + alias query find_by + + # Move to the next adjacent block in document order. If the current block is the last + # item in a list, this method will return the following sibling of the list block. + def next_adjacent_block + unless @context == :document + if (p = @parent).context == :dlist && @context == :list_item + (sib = p.items[(p.items.find_index {|terms, desc| (terms.include? self) || desc == self }) + 1]) ? sib : p.next_adjacent_block else - result << self + (sib = p.blocks[(p.blocks.find_index self) + 1]) ? sib : p.next_adjacent_block end end + end - # process document header as a section if present - if @context == :document && (any_context || context_selector == :section) && header? - result.concat(@header.find_by selector, &block) - end + # Public: Get the Array of child Section objects + # + # Only applies to Document and Section instances + # + # Examples + # + # doc << (sect1 = Section.new doc, 1) + # sect1.title = 'Section 1' + # para1 = Block.new sect1, :paragraph, source: 'Paragraph 1' + # para2 = Block.new sect1, :paragraph, source: 'Paragraph 2' + # sect1 << para1 << para2 + # sect1 << (sect1_1 = Section.new sect1, 2) + # sect1_1.title = 'Section 1.1' + # sect1_1 << (Block.new sect1_1, :paragraph, source: 'Paragraph 3') + # sect1.blocks? + # # => true + # sect1.blocks.size + # # => 3 + # sect1.sections.size + # # => 1 + # + # Returns an [Array] of Section objects + def sections + @blocks.select {|block| block.context == :section } + end - unless context_selector == :document # optimization - # yuck, dlist is a special case - if @context == :dlist - if any_context || context_selector != :section # optimization - @blocks.flatten.each do |li| - # NOTE the list item of a dlist can be nil, so we have to check - result.concat(li.find_by selector, &block) if li - end - end - elsif - @blocks.each do |b| - next if (context_selector == :section && b.context != :section) # optimization - result.concat(b.find_by selector, &block) - end + # Public: Returns the converted alt text for this block image. + # + # Returns the [String] value of the alt attribute with XML special character + # and replacement substitutions applied. + def alt + if (text = @attributes['alt']) + if text == @attributes['default-alt'] + sub_specialchars text + else + text = sub_specialchars text + (ReplaceableTextRx.match? text) ? (sub_replacements text) : text end + else + '' end - result end - alias :query :find_by - # Public: Remove a substitution from this block + # Gets the caption for this block. # - # sub - The Symbol substitution name + # This method routes the deprecated use of the caption method on an + # admonition block to the textlabel attribute. # - # Returns nothing - def remove_sub sub - @subs.delete sub - nil + # Returns the [String] caption for this block (or the value of the textlabel + # attribute if this is an admonition block). + def caption + @context == :admonition ? @attributes['textlabel'] : @caption end - # Public: Generate a caption and assign it to this block if one - # is not already assigned. + # Public: Convenience method that returns the interpreted title of the Block + # with the caption prepended. # - # If the block has a title and a caption prefix is available - # for this block, then build a caption from this information, - # assign it a number and store it to the caption attribute on - # the block. - # - # If an explicit caption has been specified on this block, then - # do nothing. - # - # key - The prefix of the caption and counter attribute names. - # If not provided, the name of the context for this block - # is used. (default: nil). + # Concatenates the value of this Block's caption instance variable and the + # return value of this Block's title method. No space is added between the + # two values. If the Block does not have a caption, the interpreted title is + # returned. # - # Returns nothing - def assign_caption(caption = nil, key = nil) - return unless title? || !@caption - - if caption - @caption = caption - else - if (value = @document.attributes['caption']) - @caption = value - elsif title? - key ||= @context.to_s - caption_key = "#{key}-caption" - if (caption_title = @document.attributes[caption_key]) - caption_num = @document.counter_increment("#{key}-number", self) - @caption = "#{caption_title} #{caption_num}. " - end - end - end - nil + # Returns the converted String title prefixed with the caption, or just the + # converted String title if no caption is set + def captioned_title + %(#{@caption}#{title}) end # Public: Retrieve the list marker keyword for the specified list type. @@ -366,33 +264,154 @@ ORDERED_LIST_KEYWORDS[list_type || @style] end - # Internal: Assign the next index (0-based) to this section + # Public: Get the String title of this Block with title substitions applied + # + # The following substitutions are applied to block and section titles: + # + # :specialcharacters, :quotes, :replacements, :macros, :attributes and :post_replacements + # + # Examples + # + # block.title = "Foo 3^ # {two-colons} Bar(1)" + # block.title + # => "Foo 3^ # :: Bar(1)" + # + # Returns the converted String title for this Block, or nil if the source title is falsy + def title + # prevent substitutions from being applied to title multiple times + @converted_title ||= @title && (apply_title_subs @title) + end + + # Public: A convenience method that checks whether the title of this block is defined. + # + # Returns a [Boolean] indicating whether this block has a title. + def title? + @title ? true : false + end + + # Public: Set the String block title. + # + # Returns the new String title assigned to this Block + def title= val + @converted_title = nil + @title = val + end + + # Public: A convenience method that checks whether the specified + # substitution is enabled for this block. + # + # name - The Symbol substitution name + # + # Returns A Boolean indicating whether the specified substitution is + # enabled for this block + def sub? name + @subs.include? name + end + + # Public: Remove a substitution from this block # - # Assign the next index of this section within the parent - # Block (in document order) + # sub - The Symbol substitution name # # Returns nothing - def assign_index(section) - section.index = @next_section_index - @next_section_index += 1 - - if section.sectname == 'appendix' - appendix_number = @document.counter 'appendix-number', 'A' - section.number = appendix_number if section.numbered - if (caption = @document.attr 'appendix-caption', '').empty? - section.caption = %(#{appendix_number}. ) - else - section.caption = %(#{caption} #{appendix_number}: ) + def remove_sub sub + @subs.delete sub + nil + end + + # Public: Generate cross reference text (xreftext) that can be used to refer + # to this block. + # + # Use the explicit reftext for this block, if specified, retrieved from the + # {#reftext} method. Otherwise, if this is a section or captioned block (a + # block with both a title and caption), generate the xreftext according to + # the value of the xrefstyle argument (e.g., full, short). This logic may + # leverage the {Substitutors#sub_quotes} method to apply formatting to the + # text. If this is not a captioned block, return the title, if present, or + # nil otherwise. + # + # xrefstyle - An optional String that specifies the style to use to format + # the xreftext ('full', 'short', or 'basic') (default: nil). + # + # Returns the generated [String] xreftext used to refer to this block or + # nothing if there isn't sufficient information to generate one. + def xreftext xrefstyle = nil + if (val = reftext) && !val.empty? + val + # NOTE xrefstyle only applies to blocks with a title and a caption or number + elsif xrefstyle && @title && @caption + case xrefstyle + when 'full' + quoted_title = sub_placeholder (sub_quotes @document.compat_mode ? %q(``%s'') : '"`%s`"'), title + if @numeral && (caption_attr_name = CAPTION_ATTR_NAMES[@context]) && (prefix = @document.attributes[caption_attr_name]) + %(#{prefix} #{@numeral}, #{quoted_title}) + else + %(#{@caption.chomp '. '}, #{quoted_title}) + end + when 'short' + if @numeral && (caption_attr_name = CAPTION_ATTR_NAMES[@context]) && (prefix = @document.attributes[caption_attr_name]) + %(#{prefix} #{@numeral}) + else + @caption.chomp '. ' + end + else # 'basic' + title + end + else + title + end + end + + # Public: Generate and assign caption to block if not already assigned. + # + # If the block has a title and a caption prefix is available for this block, + # then build a caption from this information, assign it a number and store it + # to the caption attribute on the block. + # + # If a caption has already been assigned to this block, do nothing. + # + # The parts of a complete caption are: . + # This partial caption represents the part the precedes the title. + # + # value - The String caption to assign to this block or nil to use document attribute. + # caption_context - The Symbol context to use when resolving caption-related attributes. If not provided, the name of + # the context for this block is used. Only certain contexts allow the caption to be looked up. + # (default: @context) + # + # Returns nothing. + def assign_caption value, caption_context = @context + unless @caption || !@title || (@caption = value || @document.attributes['caption']) + if (attr_name = CAPTION_ATTR_NAMES[caption_context]) && (prefix = @document.attributes[attr_name]) + @caption = %(#{prefix} #{@numeral = @document.increment_and_store_counter %(#{caption_context}-number), self}. ) + nil end - elsif section.numbered - # chapters in a book doctype should be sequential even when divided into parts - if (section.level == 1 || (section.level == 0 && section.special)) && @document.doctype == 'book' - section.number = @document.counter('chapter-number', 1) + end + end + + # Internal: Assign the next index (0-based) and numeral (1-based) to the section. + # If the section is an appendix, the numeral is a letter (starting with A). This + # method also assigns the appendix caption. + # + # section - The section to which to assign the next index and numeral. + # + # Assign to the specified section the next index and, if the section is + # numbered, the numeral within this block (its parent). + # + # Returns nothing + def assign_numeral section + @next_section_index = (section.index = @next_section_index) + 1 + if (like = section.numbered) + if (sectname = section.sectname) == 'appendix' + section.numeral = @document.counter 'appendix-number', 'A' + section.caption = (caption = @document.attributes['appendix-caption']) ? %(#{caption} #{section.numeral}: ) : %(#{section.numeral}. ) + # NOTE currently chapters in a book doctype are sequential even for multi-part books (see #979) + elsif sectname == 'chapter' || like == :chapter + section.numeral = (@document.counter 'chapter-number', 1).to_s else - section.number = @next_section_number - @next_section_number += 1 + section.numeral = sectname == 'part' ? (Helpers.int_to_roman @next_section_ordinal) : @next_section_ordinal.to_s + @next_section_ordinal += 1 end end + nil end # Internal: Reassign the section indexes @@ -403,17 +422,89 @@ # # IMPORTANT You must invoke this method on a node after removing # child sections or else the internal counters will be off. - # + # # Returns nothing def reindex_sections @next_section_index = 0 - @next_section_number = 0 - @blocks.each {|block| + @next_section_ordinal = 1 + @blocks.each do |block| if block.context == :section - assign_index(block) + assign_numeral block block.reindex_sections end - } + end + end + + protected + + # Internal: Performs the work for find_by, but does not handle the StopIteration exception. + def find_by_internal selector = {}, result = [], &block + if ((any_context = (context_selector = selector[:context]) ? nil : true) || context_selector == @context) && + (!(style_selector = selector[:style]) || style_selector == @style) && + (!(role_selector = selector[:role]) || (has_role? role_selector)) && + (!(id_selector = selector[:id]) || id_selector == @id) + if block_given? + if (verdict = yield self) + case verdict + when :prune + result << self + raise ::StopIteration if id_selector + return result + when :reject + raise ::StopIteration if id_selector + return result + when :stop + raise ::StopIteration + else + result << self + raise ::StopIteration if id_selector + end + elsif id_selector + raise ::StopIteration + end + else + result << self + raise ::StopIteration if id_selector + end + end + case @context + when :document + unless context_selector == :document + # process document header as a section, if present + if header? && (any_context || context_selector == :section) + @header.find_by_internal selector, result, &block + end + @blocks.each do |b| + next if (context_selector == :section && b.context != :section) # optimization + b.find_by_internal selector, result, &block + end + end + when :dlist + # dlist has different structure than other blocks + if any_context || context_selector != :section # optimization + # NOTE the list item of a dlist can be nil, so we have to check + @blocks.flatten.each {|b| b.find_by_internal selector, result, &block if b } + end + when :table + if selector[:traverse_documents] + rows.head.each {|r| r.each {|c| c.find_by_internal selector, result, &block } } + selector = selector.merge context: :document if context_selector == :inner_document + (rows.body + rows.foot).each do |r| + r.each do |c| + c.find_by_internal selector, result, &block + c.inner_document.find_by_internal selector, result, &block if c.style == :asciidoc + end + end + else + (rows.head + rows.body + rows.foot).each {|r| r.each {|c| c.find_by_internal selector, result, &block } } + end + else + @blocks.each do |b| + next if (context_selector == :section && b.context != :section) # optimization + b.find_by_internal selector, result, &block + end + end + result end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/abstract_node.rb asciidoctor-2.0.10/lib/asciidoctor/abstract_node.rb --- asciidoctor-1.5.5/lib/asciidoctor/abstract_node.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/abstract_node.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,59 +1,49 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: An abstract base class that provides state and methods for managing a -# node of AsciiDoc content. The state and methods on this class are comment to +# node of AsciiDoc content. The state and methods on this class are common to # all content segments in an AsciiDoc document. class AbstractNode + include Substitutors, Logging - include Substitutors + # Public: Get the Hash of attributes for this node + attr_reader :attributes - # Public: Get the element which is the parent of this node - attr_reader :parent + # Public: Get the Symbol context for this node + attr_reader :context # Public: Get the Asciidoctor::Document to which this node belongs attr_reader :document - # Public: Get the Symbol context for this node - attr_reader :context + # Public: Get/Set the String id of this node + attr_accessor :id # Public: Get the String name of this node attr_reader :node_name - # Public: Get/Set the id of this node - attr_accessor :id - - # Public: Get the Hash of attributes for this node - attr_reader :attributes + # Public: Get the AbstractBlock parent element of this node + attr_reader :parent def initialize parent, context, opts = {} # document is a special case, should refer to itself if context == :document - @document = parent - else - if parent - @parent = parent - @document = parent.document - else - @parent = nil - @document = nil - end - end - @context = context - @node_name = context.to_s - # QUESTION are we correct in duplicating the attributes (seems to be just as fast) - @attributes = (opts.key? :attributes) ? opts[:attributes].dup : {} - @passthroughs = {} + @document = self + elsif parent + @document = (@parent = parent).document + end + @node_name = (@context = context).to_s + # NOTE the value of the :attributes option may be nil on an Inline node + @attributes = (attrs = opts[:attributes]) ? attrs.merge : {} + @passthroughs = [] end - # Public: Associate this Block with a new parent Block - # - # parent - The Block to set as the parent of this Block + # Public: Returns whether this {AbstractNode} is an instance of {Block} # - # Returns nothing - def parent=(parent) - @parent = parent - @document = parent.document - nil + # Returns [Boolean] + def block? + # :nocov: + raise ::NotImplementedError + # :nocov: end # Public: Returns whether this {AbstractNode} is an instance of {Inline} @@ -65,78 +55,70 @@ # :nocov: end - # Public: Returns whether this {AbstractNode} is an instance of {Block} + # Public: Get the Asciidoctor::Converter instance being used to convert the + # current Asciidoctor::Document. + def converter + @document.converter + end + + # Public: Associate this Block with a new parent Block # - # Returns [Boolean] - def block? - # :nocov: - raise ::NotImplementedError - # :nocov: + # parent - The Block to set as the parent of this Block + # + # Returns the new parent Block associated with this Block + def parent= parent + @parent, @document = parent, parent.document end - # Public: Get the value of the specified attribute + # Public: Get the value of the specified attribute. If the attribute is not found on this node, fallback_name is set, + # and this node is not the Document node, get the value of the specified attribute from the Document node. # - # Get the value for the specified attribute. First look in the attributes on - # this node and return the value of the attribute if found. Otherwise, if - # this node is a child of the Document node, look in the attributes of the - # Document node and return the value of the attribute if found. Otherwise, - # return the default value, which defaults to nil. - # - # name - the String or Symbol name of the attribute to lookup - # default_value - the Object value to return if the attribute is not found (default: nil) - # inherit - a Boolean indicating whether to check for the attribute on the - # AsciiDoctor::Document if not found on this node (default: false) - # - # return the value of the attribute or the default value if the attribute - # is not found in the attributes of this node or the document node - def attr(name, default_value = nil, inherit = true) - name = name.to_s if ::Symbol === name - inherit = false if self == @document - if inherit - @attributes[name] || @document.attributes[name] || default_value - else - @attributes[name] || default_value - end + # Look for the specified attribute in the attributes on this node and return the value of the attribute, if found. + # Otherwise, if fallback_name is set (default: same as name) and this node is not the Document node, look for that + # attribute on the Document node and return its value, if found. Otherwise, return the default value (default: nil). + # + # name - The String or Symbol name of the attribute to resolve. + # default_value - The Object value to return if the attribute is not found (default: nil). + # fallback_name - The String or Symbol of the attribute to resolve on the Document if the attribute is not found on + # this node (default: same as name). + # + # Returns the [Object] value (typically a String) of the attribute or default_value if the attribute is not found. + def attr name, default_value = nil, fallback_name = nil + @attributes[name.to_s] || (fallback_name && @parent && @document.attributes[(fallback_name == true ? name : fallback_name).to_s] || default_value) end - # Public: Check if the attribute is defined, optionally performing a - # comparison of its value if expected is not nil - # - # Check if the attribute is defined. First look in the attributes on this - # node. If not found, and this node is a child of the Document node, look in - # the attributes of the Document node. If the attribute is found and a - # comparison value is specified (not nil), return whether the two values match. - # Otherwise, return whether the attribute was found. - # - # name - the String or Symbol name of the attribute to lookup - # expect - the expected Object value of the attribute (default: nil) - # inherit - a Boolean indicating whether to check for the attribute on the - # AsciiDoctor::Document if not found on this node (default: false) - # - # return a Boolean indicating whether the attribute exists and, if a - # comparison value is specified, whether the value of the attribute matches - # the comparison value - def attr?(name, expect = nil, inherit = true) - name = name.to_s if ::Symbol === name - inherit = false if self == @document - if expect.nil? - @attributes.has_key?(name) || (inherit && @document.attributes.has_key?(name)) - elsif inherit - expect == (@attributes[name] || @document.attributes[name]) + # Public: Check if the specified attribute is defined using the same logic as {#attr}, optionally performing a + # comparison with the expected value if specified. + # + # Look for the specified attribute in the attributes on this node. If not found, fallback_name is specified (default: + # same as name), and this node is not the Document node, look for that attribute on the Document node. In either case, + # if the attribute is found, and the comparison value is truthy, return whether the two values match. Otherwise, + # return whether the attribute was found. + # + # name - The String or Symbol name of the attribute to resolve. + # expected_value - The expected Object value of the attribute (default: nil). + # fallback_name - The String or Symbol of the attribute to resolve on the Document if the attribute is not found on + # this node (default: same as name). + # + # Returns a [Boolean] indicating whether the attribute exists and, if a truthy comparison value is specified, whether + # the value of the attribute matches the comparison value. + def attr? name, expected_value = nil, fallback_name = nil + if expected_value + expected_value == (@attributes[name.to_s] || (fallback_name && @parent ? @document.attributes[(fallback_name == true ? name : fallback_name).to_s] : nil)) else - expect == @attributes[name] + (@attributes.key? name.to_s) || (fallback_name && @parent ? (@document.attributes.key? (fallback_name == true ? name : fallback_name).to_s) : false) end end # Public: Assign the value to the attribute name for the current node. # # name - The String attribute name to assign - # value - The Object value to assign to the attribute + # value - The Object value to assign to the attribute (default: '') # overwrite - A Boolean indicating whether to assign the attribute # if currently present in the attributes Hash (default: true) # # Returns a [Boolean] indicating whether the assignment was performed - def set_attr name, value, overwrite = true + def set_attr name, value = '', overwrite = true if overwrite == false && (@attributes.key? name) false else @@ -145,117 +127,149 @@ end end - # TODO document me - def set_option(name) - if @attributes.has_key? 'options' - @attributes['options'] = "#{@attributes['options']},#{name}" - else - @attributes['options'] = name - end - @attributes["#{name}-option"] = '' + # Public: Remove the attribute from the current node. + # + # name - The String attribute name to remove + # + # Returns the previous [String] value, or nil if the attribute was not present. + def remove_attr name + @attributes.delete name end # Public: A convenience method to check if the specified option attribute is # enabled on the current node. # # Check if the option is enabled. This method simply checks to see if the - # %name%-option attribute is defined on the current node. + # <name>-option attribute is defined on the current node. # # name - the String or Symbol name of the option # # return a Boolean indicating whether the option has been specified - def option?(name) - @attributes.has_key? %(#{name}-option) + def option? name + @attributes[%(#{name}-option)] ? true : false + end + + # Public: Set the specified option on this node. + # + # This method sets the specified option on this node by setting the <name>-option attribute. + # + # name - the String name of the option + # + # Returns Nothing + def set_option name + @attributes[%(#{name}-option)] = '' + nil + end + + # Public: Retrieve the Set of option names that are enabled on this node + # + # Returns a [Set] of option names + def enabled_options + ::Set.new.tap {|accum| @attributes.each_key {|k| accum << (k.slice 0, k.length - 7) if k.to_s.end_with? '-option' } } end # Public: Update the attributes of this node with the new values in # the attributes argument. # # If an attribute already exists with the same key, it's value will - # be overridden. + # be overwritten. # - # attributes - A Hash of attributes to assign to this node. + # new_attributes - A Hash of additional attributes to assign to this node. # - # Returns nothing - def update_attributes(attributes) - @attributes.update(attributes) - nil + # Returns the updated attributes [Hash] on this node. + def update_attributes new_attributes + @attributes.update new_attributes end - # Public: Get the Asciidoctor::Converter instance being used to convert the - # current Asciidoctor::Document. - def converter - @document.converter + # Public: Retrieves the space-separated String role for this node. + # + # Returns the role as a space-separated [String]. + def role + @attributes['role'] end - # Public: A convenience method that checks if the role attribute is specified - def role?(expect = nil) - if expect - expect == (@attributes['role'] || @document.attributes['role']) - else - @attributes.has_key?('role') || @document.attributes.has_key?('role') - end + # Public: Retrieves the String role names for this node as an Array. + # + # Returns the role names as a String [Array], which is empty if the role attribute is absent on this node. + def roles + (val = @attributes['role']) ? val.split : [] end - # Public: A convenience method that returns the value of the role attribute - def role - @attributes['role'] || @document.attributes['role'] + # Public: Checks if the role attribute is set on this node and, if an expected value is given, whether the + # space-separated role matches that value. + # + # expected_value - The expected String value of the role (optional, default: nil) + # + # Returns a [Boolean] indicating whether the role attribute is set on this node and, if an expected value is given, + # whether the space-separated role matches that value. + def role? expected_value = nil + expected_value ? expected_value == @attributes['role'] : (@attributes.key? 'role') end - # Public: A convenience method that checks if the specified role is present - # in the list of roles on this node - def has_role?(name) - if (val = (@attributes['role'] || @document.attributes['role'])) - val.split(' ').include?(name) - else - false - end + # Public: Checks if the specified role is present in the list of roles for this node. + # + # name - The String name of the role to find. + # + # Returns a [Boolean] indicating whether this node has the specified role. + def has_role? name + # NOTE center + include? is faster than split + include? + (val = @attributes['role']) ? (%( #{val} ).include? %( #{name} )) : false end - # Public: A convenience method that returns the role names as an Array - def roles - if (val = (@attributes['role'] || @document.attributes['role'])) - val.split(' ') + # Public: Adds the given role directly to this node. + # + # Returns a [Boolean] indicating whether the role was added. + def add_role name + if (val = @attributes['role']) + # NOTE center + include? is faster than split + include? + if %( #{val} ).include? %( #{name} ) + false + else + @attributes['role'] = %(#{val} #{name}) + true + end else - [] + @attributes['role'] = name + true end end - # Public: A convenience method that adds the given role directly to this node - def add_role(name) - unless (roles = (@attributes['role'] || '').split(' ')).include? name - @attributes['role'] = roles.push(name) * ' ' + # Public: Removes the given role directly from this node. + # + # Returns a [Boolean] indicating whether the role was removed. + def remove_role name + if (val = @attributes['role']) && ((val = val.split).delete name) + if val.empty? + @attributes.delete 'role' + else + @attributes['role'] = val.join ' ' + end + true + else + false end end - # Public: A convenience method that removes the given role directly from this node - def remove_role(name) - if (roles = (@attributes['role'] || '').split(' ')).include? name - roles.delete name - @attributes['role'] = roles * ' ' - end + # Public: A convenience method that returns the value of the reftext attribute with substitutions applied. + def reftext + (val = @attributes['reftext']) ? (apply_reftext_subs val) : nil end - # Public: A convenience method that checks if the reftext attribute is specified + # Public: A convenience method that checks if the reftext attribute is defined. def reftext? - @attributes.has_key?('reftext') || @document.attributes.has_key?('reftext') - end - - # Public: A convenience method that returns the value of the reftext attribute - def reftext - @attributes['reftext'] || @document.attributes['reftext'] + @attributes.key? 'reftext' end # Public: Construct a reference or data URI to an icon image for the # specified icon name. # # If the 'icon' attribute is set on this block, the name is ignored and the - # value of this attribute is used as the target image path. Otherwise, + # value of this attribute is used as the target image path. Otherwise, # construct a target image path by concatenating the value of the 'iconsdir' - # attribute, the icon name and the value of the 'icontype' attribute + # attribute, the icon name, and the value of the 'icontype' attribute # (defaulting to 'png'). # - # The target image path is then passed through the #image_uri() method. If + # The target image path is then passed through the #image_uri() method. If # the 'data-uri' attribute is set on the document, the image will be # safely converted to a data URI. # @@ -266,33 +280,13 @@ # Returns A String reference or data URI for an icon image def icon_uri name if attr? 'icon' - # QUESTION should we add extension if resolved value is an absolute URI? - if ::File.extname(uri = (image_uri attr('icon'), 'iconsdir')).empty? - %(#{uri}.#{@document.attr 'icontype', 'png'}) - else - uri - end + icon = attr 'icon' + # QUESTION should we be adding the extension if the icon is an absolute URI? + icon = %(#{icon}.#{@document.attr 'icontype', 'png'}) unless Helpers.extname? icon else - image_uri %(#{name}.#{@document.attr 'icontype', 'png'}), 'iconsdir' + icon = %(#{name}.#{@document.attr 'icontype', 'png'}) end - end - - # Public: Construct a URI reference to the target media. - # - # If the target media is a URI reference, then leave it untouched. - # - # The target media is resolved relative to the directory retrieved from the - # specified attribute key, if provided. - # - # The return value can be safely used in a media tag (img, audio, video). - # - # target - A String reference to the target media - # asset_dir_key - The String attribute key used to lookup the directory where - # the media is located (default: 'imagesdir') - # - # Returns A String reference for the target media - def media_uri(target, asset_dir_key = 'imagesdir') - normalize_web_path target, (asset_dir_key ? @document.attr(asset_dir_key) : nil) + image_uri icon, 'iconsdir' end # Public: Construct a URI reference or data URI to the target image. @@ -315,23 +309,37 @@ # # Returns A String reference or data URI for the target image def image_uri(target_image, asset_dir_key = 'imagesdir') - if (doc = @document).safe < SafeMode::SECURE && doc.attr?('data-uri') - if (Helpers.uriish? target_image) || - (asset_dir_key && (images_base = doc.attr(asset_dir_key)) && (Helpers.uriish? images_base) && - (target_image = normalize_web_path(target_image, images_base, false))) - if doc.attr?('allow-uri-read') - generate_data_uri_from_uri target_image, doc.attr?('cache-uri') - else - target_image - end + if (doc = @document).safe < SafeMode::SECURE && (doc.attr? 'data-uri') + if ((Helpers.uriish? target_image) && (target_image = Helpers.encode_spaces_in_uri target_image)) || + (asset_dir_key && (images_base = doc.attr asset_dir_key) && (Helpers.uriish? images_base) && + (target_image = normalize_web_path target_image, images_base, false)) + (doc.attr? 'allow-uri-read') ? (generate_data_uri_from_uri target_image, (doc.attr? 'cache-uri')) : target_image else generate_data_uri target_image, asset_dir_key end else - normalize_web_path target_image, (asset_dir_key ? doc.attr(asset_dir_key) : nil) + normalize_web_path target_image, (asset_dir_key ? (doc.attr asset_dir_key) : nil) end end + # Public: Construct a URI reference to the target media. + # + # If the target media is a URI reference, then leave it untouched. + # + # The target media is resolved relative to the directory retrieved from the + # specified attribute key, if provided. + # + # The return value can be safely used in a media tag (img, audio, video). + # + # target - A String reference to the target media + # asset_dir_key - The String attribute key used to lookup the directory where + # the media is located (default: 'imagesdir') + # + # Returns A String reference for the target media + def media_uri(target, asset_dir_key = 'imagesdir') + normalize_web_path target, (asset_dir_key ? @document.attr(asset_dir_key) : nil) + end + # Public: Generate a data URI that can be used to embed an image in the output document # # First, and foremost, the target image path is cleaned if the document safe mode level @@ -345,31 +353,27 @@ # # Returns A String data URI containing the content of the target image def generate_data_uri(target_image, asset_dir_key = nil) - ext = ::File.extname target_image - # QUESTION what if ext is empty? - mimetype = (ext == '.svg' ? 'image/svg+xml' : %(image/#{ext[1..-1]})) - if asset_dir_key - image_path = normalize_system_path(target_image, @document.attr(asset_dir_key), nil, :target_name => 'image') + if (ext = Helpers.extname target_image, nil) + mimetype = ext == '.svg' ? 'image/svg+xml' : %(image/#{ext.slice 1, ext.length}) else - image_path = normalize_system_path(target_image) + mimetype = 'application/octet-stream' end - unless ::File.readable? image_path - warn %(asciidoctor: WARNING: image to embed not found or not readable: #{image_path}) - # must enclose string following return in " for Opal - return "data:#{mimetype}:base64," - # uncomment to return 1 pixel white dot instead - #return 'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' + if asset_dir_key + image_path = normalize_system_path(target_image, @document.attr(asset_dir_key), nil, target_name: 'image') + else + image_path = normalize_system_path(target_image) end - bindata = nil - if ::IO.respond_to? :binread - bindata = ::IO.binread(image_path) + if ::File.readable? image_path + # NOTE base64 is autoloaded by reference to ::Base64 + %(data:#{mimetype};base64,#{::Base64.strict_encode64 ::File.binread image_path}) else - bindata = ::File.open(image_path, 'rb') {|file| file.read } + logger.warn %(image to embed not found or not readable: #{image_path}) + %(data:#{mimetype};base64,) + # uncomment to return 1 pixel white dot instead + #'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' end - # NOTE base64 is autoloaded by reference to ::Base64 - %(data:#{mimetype};base64,#{::Base64.encode64(bindata).delete EOL}) end # Public: Read the image data from the specified URI and generate a data URI @@ -389,21 +393,17 @@ # caching requires the open-uri-cached gem to be installed # processing will be automatically aborted if these libraries can't be opened Helpers.require_library 'open-uri/cached', 'open-uri-cached' - elsif !::RUBY_ENGINE_OPAL + elsif !RUBY_ENGINE_OPAL # autoload open-uri ::OpenURI end begin - mimetype = nil - bindata = open(image_uri, 'rb') {|file| - mimetype = file.content_type - file.read - } + mimetype, bindata = ::OpenURI.open_uri(image_uri, URI_READ_MODE) {|f| [f.content_type, f.read] } # NOTE base64 is autoloaded by reference to ::Base64 - %(data:#{mimetype};base64,#{::Base64.encode64(bindata).delete EOL}) + %(data:#{mimetype};base64,#{::Base64.strict_encode64 bindata}) rescue - warn %(asciidoctor: WARNING: could not retrieve image data from URI: #{image_uri}) + logger.warn %(could not retrieve image data from URI: #{image_uri}) image_uri # uncomment to return empty data (however, mimetype needs to be resolved) #%(data:#{mimetype}:base64,) @@ -412,90 +412,12 @@ end end - # Public: Resolve the URI or system path to the specified target, then read and return its contents - # - # The URI or system path of the target is first resolved. If the resolved path is a URI, read the - # contents from the URI if the allow-uri-read attribute is set, enabling caching if the cache-uri - # attribute is also set. If the resolved path is not a URI, read the contents of the file from the - # file system. If the normalize option is set, the data will be normalized. - # - # target - The URI or local path from which to read the data. - # opts - a Hash of options to control processing (default: {}) - # * :label the String label of the target to use in warning messages (default: 'asset') - # * :normalize a Boolean that indicates whether the data should be normalized (default: false) - # * :start the String relative base path to use when resolving the target (default: nil) - # * :warn_on_failure a Boolean that indicates whether warnings are issued if the target cannot be read (default: true) - # Returns the contents of the resolved target or nil if the resolved target cannot be read - # -- - # TODO refactor other methods in this class to use this method were possible (repurposing if necessary) - def read_contents target, opts = {} - doc = @document - if (Helpers.uriish? target) || ((start = opts[:start]) && (Helpers.uriish? start) && - (target = (@path_resolver ||= PathResolver.new).web_path target, start)) - if doc.attr? 'allow-uri-read' - Helpers.require_library 'open-uri/cached', 'open-uri-cached' if doc.attr? 'cache-uri' - begin - data = ::OpenURI.open_uri(target) {|fd| fd.read } - data = (Helpers.normalize_lines_from_string data) * EOL if opts[:normalize] - rescue - warn %(asciidoctor: WARNING: could not retrieve contents of #{opts[:label] || 'asset'} at URI: #{target}) if opts.fetch :warn_on_failure, true - data = nil - end - else - warn %(asciidoctor: WARNING: cannot retrieve contents of #{opts[:label] || 'asset'} at URI: #{target} (allow-uri-read attribute not enabled)) if opts.fetch :warn_on_failure, true - data = nil - end - else - target = normalize_system_path target, opts[:start], nil, :target_name => (opts[:label] || 'asset') - data = read_asset target, :normalize => opts[:normalize], :warn_on_failure => (opts.fetch :warn_on_failure, true) - end - data - end - - # Public: Read the contents of the file at the specified path. - # This method assumes that the path is safe to read. It checks - # that the file is readable before attempting to read it. - # - # path - the String path from which to read the contents - # opts - a Hash of options to control processing (default: {}) - # * :warn_on_failure a Boolean that controls whether a warning - # is issued if the file cannot be read (default: false) - # * :normalize a Boolean that controls whether the lines - # are normalized and coerced to UTF-8 (default: false) - # - # Returns the [String] content of the file at the specified path, or nil - # if the file does not exist. - def read_asset(path, opts = {}) - # remap opts for backwards compatibility - opts = { :warn_on_failure => (opts != false) } unless ::Hash === opts - if ::File.readable? path - if opts[:normalize] - Helpers.normalize_lines_from_string(::IO.read(path)) * EOL - else - # QUESTION should we chomp or rstrip content? - ::IO.read(path) - end - else - warn %(asciidoctor: WARNING: file does not exist or cannot be read: #{path}) if opts[:warn_on_failure] - nil - end - end - - # Public: Normalize the web page using the PathResolver. - # - # See {PathResolver#web_path} for details. - # - # target - the String target path - # start - the String start (i.e, parent) path (optional, default: nil) - # preserve_uri_target - a Boolean indicating whether target should be preserved if contains a URI (default: true) + # Public: Normalize the asset file or directory to a concrete and rinsed path # - # Returns the resolved [String] path - def normalize_web_path(target, start = nil, preserve_uri_target = true) - if preserve_uri_target && (Helpers.uriish? target) - target - else - (@path_resolver ||= PathResolver.new).web_path target, start - end + # Delegates to normalize_system_path, with the start path set to the value of + # the base_dir instance variable on the Document object. + def normalize_asset_path(asset_ref, asset_name = 'path', autocorrect = true) + normalize_system_path(asset_ref, @document.base_dir, nil, target_name: asset_name, recover: autocorrect) end # Public: Resolve and normalize a secure path from the target and start paths @@ -513,8 +435,8 @@ # start - the String start (i.e., parent) path # jail - the String jail path to confine the resolved path # opts - an optional Hash of options to control processing (default: {}): - # * :recover is used to control whether the processor should auto-recover - # when an illegal path is encountered + # * :recover is used to control whether the processor should + # automatically recover when an illegal path is encountered # * :target_name is used in messages to refer to the path being resolved # # raises a SecurityError if a jail is specified and the resolved path is @@ -524,10 +446,9 @@ # parent references resolved and self references removed. If a jail is provided, # this path will be guaranteed to be contained within the jail. def normalize_system_path target, start = nil, jail = nil, opts = {} - path_resolver = (@path_resolver ||= PathResolver.new) if (doc = @document).safe < SafeMode::SAFE if start - start = ::File.join doc.base_dir, start unless path_resolver.is_root? start + start = ::File.join doc.base_dir, start unless doc.path_resolver.root? start else start = doc.base_dir end @@ -535,26 +456,98 @@ start = doc.base_dir unless start jail = doc.base_dir unless jail end - path_resolver.system_path target, start, jail, opts + doc.path_resolver.system_path target, start, jail, opts end - # Public: Normalize the asset file or directory to a concrete and rinsed path + # Public: Normalize the web path using the PathResolver. # - # Delegates to normalize_system_path, with the start path set to the value of - # the base_dir instance variable on the Document object. - def normalize_asset_path(asset_ref, asset_name = 'path', autocorrect = true) - normalize_system_path(asset_ref, @document.base_dir, nil, - :target_name => asset_name, :recover => autocorrect) + # See {PathResolver#web_path} for details about path resolution and encoding. + # + # target - the String target path + # start - the String start (i.e, parent) path (optional, default: nil) + # preserve_uri_target - a Boolean indicating whether target should be preserved if contains a URI (default: true) + # + # Returns the resolved [String] path + def normalize_web_path(target, start = nil, preserve_uri_target = true) + if preserve_uri_target && (Helpers.uriish? target) + Helpers.encode_spaces_in_uri target + else + @document.path_resolver.web_path target, start + end + end + + # Public: Read the contents of the file at the specified path. + # This method assumes that the path is safe to read. It checks + # that the file is readable before attempting to read it. + # + # path - the String path from which to read the contents + # opts - a Hash of options to control processing (default: {}) + # * :warn_on_failure a Boolean that controls whether a warning + # is issued if the file cannot be read (default: false) + # * :normalize a Boolean that controls whether the lines + # are normalized and coerced to UTF-8 (default: false) + # + # Returns the [String] content of the file at the specified path, or nil + # if the file does not exist. + def read_asset path, opts = {} + # remap opts for backwards compatibility + opts = { warn_on_failure: (opts != false) } unless ::Hash === opts + if ::File.readable? path + # QUESTION should we chomp content if normalize is false? + opts[:normalize] ? ((Helpers.prepare_source_string ::File.read path, mode: FILE_READ_MODE).join LF) : (::File.read path, mode: FILE_READ_MODE) + elsif opts[:warn_on_failure] + logger.warn %(#{(attr 'docfile') || '<stdin>'}: #{opts[:label] || 'file'} does not exist or cannot be read: #{path}) + nil + end end - # Public: Calculate the relative path to this absolute filename from the Document#base_dir - #def relative_path(filename) - # (@path_resolver ||= PathResolver.new).relative_path filename, @document.base_dir - #end + # Public: Resolve the URI or system path to the specified target, then read and return its contents + # + # The URI or system path of the target is first resolved. If the resolved path is a URI, read the + # contents from the URI if the allow-uri-read attribute is set, enabling caching if the cache-uri + # attribute is also set. If the resolved path is not a URI, read the contents of the file from the + # file system. If the normalize option is set, the data will be normalized. + # + # target - The URI or local path from which to read the data. + # opts - a Hash of options to control processing (default: {}) + # * :label the String label of the target to use in warning messages (default: 'asset') + # * :normalize a Boolean that indicates whether the data should be normalized (default: false) + # * :start the String relative base path to use when resolving the target (default: nil) + # * :warn_on_failure a Boolean that indicates whether warnings are issued if the target cannot be read (default: true) + # Returns the contents of the resolved target or nil if the resolved target cannot be read + # -- + # TODO refactor other methods in this class to use this method were possible (repurposing if necessary) + def read_contents target, opts = {} + doc = @document + if (Helpers.uriish? target) || ((start = opts[:start]) && (Helpers.uriish? start) && + (target = doc.path_resolver.web_path target, start)) + if doc.attr? 'allow-uri-read' + Helpers.require_library 'open-uri/cached', 'open-uri-cached' if doc.attr? 'cache-uri' + begin + if opts[:normalize] + (Helpers.prepare_source_string ::OpenURI.open_uri(target, URI_READ_MODE) {|f| f.read }).join LF + else + ::OpenURI.open_uri(target, URI_READ_MODE) {|f| f.read } + end + rescue + logger.warn %(could not retrieve contents of #{opts[:label] || 'asset'} at URI: #{target}) if opts.fetch :warn_on_failure, true + return + end + else + logger.warn %(cannot retrieve contents of #{opts[:label] || 'asset'} at URI: #{target} (allow-uri-read attribute not enabled)) if opts.fetch :warn_on_failure, true + return + end + else + target = normalize_system_path target, opts[:start], nil, target_name: (opts[:label] || 'asset') + read_asset target, normalize: opts[:normalize], warn_on_failure: (opts.fetch :warn_on_failure, true), label: opts[:label] + end + end - # Public: Check whether the specified String is a URI by + # Deprecated: Check whether the specified String is a URI by # matching it against the Asciidoctor::UriSniffRx regex. # + # In use by Asciidoctor PDF + # # @deprecated Use Helpers.uriish? instead def is_uri? str Helpers.uriish? str diff -Nru asciidoctor-1.5.5/lib/asciidoctor/attribute_list.rb asciidoctor-2.0.10/lib/asciidoctor/attribute_list.rb --- asciidoctor-1.5.5/lib/asciidoctor/attribute_list.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/attribute_list.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,60 +1,51 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: Handles parsing AsciiDoc attribute lists into a Hash of key/value # pairs. By default, attributes must each be separated by a comma and quotes # may be used around the value. If a key is not detected, the value is assigned # to a 1-based positional key, The positional attributes can be "rekeyed" when -# given a posattrs array either during parsing or after the fact. +# given a positional_attrs array either during parsing or after the fact. # # Examples # # attrlist = Asciidoctor::AttributeList.new('astyle') # # attrlist.parse -# => {0 => 'astyle'} +# => { 0 => 'astyle' } # # attrlist.rekey(['style']) -# => {'style' => 'astyle'} +# => { 'style' => 'astyle' } # # attrlist = Asciidoctor::AttributeList.new('quote, Famous Person, Famous Book (2001)') # # attrlist.parse(['style', 'attribution', 'citetitle']) -# => {'style' => 'quote', 'attribution' => 'Famous Person', 'citetitle' => 'Famous Book (2001)'} +# => { 'style' => 'quote', 'attribution' => 'Famous Person', 'citetitle' => 'Famous Book (2001)' } # class AttributeList - - # FIXME Opal not inheriting constants from parent scope - # NOTE can't use ::RUBY_ENGINE_OPAL here either - if ::RUBY_ENGINE == 'opal' - CG_BLANK = '[ \\t]' - CC_WORD = 'a-zA-Z0-9_' - CG_WORD = '[a-zA-Z0-9_]' - end + BACKSLASH = '\\' + APOS = '\'' # Public: Regular expressions for detecting the boundary of a value BoundaryRxs = { '"' => /.*?[^\\](?=")/, - '\'' => /.*?[^\\](?=')/, - ',' => /.*?(?=#{CG_BLANK}*(,|$))/ + APOS => /.*?[^\\](?=')/, + ',' => /.*?(?=[ \t]*(,|$))/ } # Public: Regular expressions for unescaping quoted characters - EscapedQuoteRxs = { - '"' => /\\"/, - '\'' => /\\'/ + EscapedQuotes = { + '"' => '\\"', + APOS => '\\\'' } # Public: A regular expression for an attribute name (approx. name token from XML) # TODO named attributes cannot contain dash characters NameRx = /#{CG_WORD}[#{CC_WORD}\-.]*/ - BlankRx = /#{CG_BLANK}+/ + BlankRx = /[ \t]+/ - # Public: Regular expressions for skipping blanks and delimiters - SkipRxs = { - :blank => BlankRx, - ',' => /#{CG_BLANK}*(,|$)/ - } + # Public: Regular expressions for skipping delimiters + SkipRxs = { ',' => /[ \t]*(,|$)/ } def initialize source, block = nil, delimiter = ',' @scanner = ::StringScanner.new source @@ -65,11 +56,11 @@ @attributes = nil end - def parse_into attributes, posattrs = [] - attributes.update(parse posattrs) + def parse_into attributes, positional_attrs = [] + attributes.update parse positional_attrs end - def parse posattrs = [] + def parse positional_attrs = [] # return if already parsed return @attributes if @attributes @@ -78,7 +69,7 @@ #attributes[0] = @scanner.string index = 0 - while parse_attribute index, posattrs + while parse_attribute index, positional_attrs break if @scanner.eos? skip_delimiter index += 1 @@ -87,24 +78,25 @@ @attributes end - def rekey posattrs - AttributeList.rekey @attributes, posattrs + def rekey positional_attrs + AttributeList.rekey @attributes, positional_attrs end - def self.rekey attributes, pos_attrs - pos_attrs.each_with_index do |key, index| - next unless key - pos = index + 1 - if (val = attributes[pos]) + def self.rekey attributes, positional_attrs + index = 0 + positional_attrs.each do |key| + index += 1 + if (val = attributes[index]) # QUESTION should we delete the positional key? attributes[key] = val - end + end if key end - attributes end - def parse_attribute index = 0, pos_attrs = [] + private + + def parse_attribute index = 0, positional_attrs = [] single_quoted_value = false skip_blank # example: "quote" @@ -112,10 +104,10 @@ name = parse_attribute_value @scanner.get_byte value = nil # example: 'quote' - elsif first == '\'' + elsif first == APOS name = parse_attribute_value @scanner.get_byte value = nil - single_quoted_value = true + single_quoted_value = true unless name.start_with? APOS else name = scan_name @@ -142,12 +134,12 @@ if (c = @scanner.get_byte) == '"' value = parse_attribute_value c # example: foo='bar' || foo='ba\'zaar' || foo='ba"zaar' - elsif c == '\'' + elsif c == APOS value = parse_attribute_value c - single_quoted_value = true + single_quoted_value = true unless value.start_with? APOS # example: foo=, elsif c == @delimiter - value = nil + value = '' # example: foo=bar (all spaces ignored) else value = %(#{c}#{scan_to_delimiter}) @@ -162,18 +154,28 @@ # opts is an alias for options case name when 'options', 'opts' - name = 'options' - value.tr(' ', '').split(',').each {|opt| @attributes[%(#{opt}-option)] = '' } - @attributes[name] = value - when 'title' - @attributes[name] = value + if value.include? ',' + value = value.delete ' ' if value.include? ' ' + (value.split ',').each {|opt| @attributes[%(#{opt}-option)] = '' unless opt.empty? } + else + @attributes[%(#{value}-option)] = '' unless value.empty? + end else - @attributes[name] = single_quoted_value && !value.empty? && @block ? (@block.apply_normal_subs value) : value + if single_quoted_value && @block + case name + when 'title', 'reftext' + @attributes[name] = value + else + @attributes[name] = @block.apply_subs value + end + else + @attributes[name] = value + end end else - resolved_name = single_quoted_value && !name.empty? && @block ? (@block.apply_normal_subs name) : name - if (pos_name = pos_attrs[index]) - @attributes[pos_name] = resolved_name + resolved_name = single_quoted_value && @block ? (@block.apply_subs name) : name + if (positional_attr_name = positional_attrs[index]) + @attributes[positional_attr_name] = resolved_name end # QUESTION should we always assign the positional key? @attributes[index + 1] = resolved_name @@ -193,7 +195,11 @@ if (value = scan_to_quote quote) @scanner.get_byte - value.gsub EscapedQuoteRxs[quote], quote + if value.include? BACKSLASH + value.gsub EscapedQuotes[quote], quote + else + value + end else %(#{quote}#{scan_to_delimiter}) end @@ -218,6 +224,5 @@ def scan_to_quote quote @scanner.scan BoundaryRxs[quote] end - end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/block.rb asciidoctor-2.0.10/lib/asciidoctor/block.rb --- asciidoctor-1.5.5/lib/asciidoctor/block.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/block.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,30 +1,30 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor -# Public: Methods for managing blocks of Asciidoc content in a section. +# Public: Methods for managing AsciiDoc content blocks. # # Examples # -# block = Asciidoctor::Block.new(parent, :paragraph, :source => '_This_ is a <test>') +# block = Asciidoctor::Block.new(parent, :paragraph, source: '_This_ is a <test>') # block.content # => "<em>This</em> is a <test>" class Block < AbstractBlock (DEFAULT_CONTENT_MODEL = { # TODO should probably fill in all known blocks - :audio => :empty, - :image => :empty, - :listing => :verbatim, - :literal => :verbatim, - :stem => :raw, - :open => :compound, - :page_break => :empty, - :pass => :raw, - :thematic_break => :empty, - :video => :empty + audio: :empty, + image: :empty, + listing: :verbatim, + literal: :verbatim, + stem: :raw, + open: :compound, + page_break: :empty, + pass: :raw, + thematic_break: :empty, + video: :empty, }).default = :simple # Public: Create alias for context to be consistent w/ AsciiDoc - alias :blockname :context + alias blockname context # Public: Get/Set the original Array content for this block, if applicable attr_accessor :lines @@ -40,10 +40,10 @@ # * :source a String or Array of raw source for this Block. (default: nil) # # IMPORTANT: If you don't specify the `:subs` option, you must explicitly call - # the `lock_in_subs` method to resolve and assign the substitutions to this + # the `commit_subs` method to resolve and assign the substitutions to this # block (which are resolved from the `subs` attribute, if specified, or the # default substitutions based on this block's context). If you want to use the - # default subs for a block, pass the option `:subs => :default`. You can + # default subs for a block, pass the option `subs: :default`. You can # override the default subs using the `:default_subs` option. #-- # QUESTION should we store source_data as lines for blocks that have compound content models? @@ -51,19 +51,19 @@ super @content_model = opts[:content_model] || DEFAULT_CONTENT_MODEL[context] if opts.key? :subs - # FIXME feels funky; we have to be defensive to get lock_in_subs to honor override + # FIXME feels funky; we have to be defensive to get commit_subs to honor override # FIXME does not resolve substitution groups inside Array (e.g., [:normal]) if (subs = opts[:subs]) - # e.g., :subs => :defult + # e.g., subs: :defult # subs attribute is honored; falls back to opts[:default_subs], then built-in defaults based on context if subs == :default @default_subs = opts[:default_subs] - # e.g., :subs => [:quotes] + # e.g., subs: [:quotes] # subs attribute is not honored elsif ::Array === subs - @default_subs = subs.dup + @default_subs = subs.drop 0 @attributes.delete 'subs' - # e.g., :subs => :normal or :subs => 'normal' + # e.g., subs: :normal or subs: 'normal' # subs attribute is not honored else @default_subs = nil @@ -71,26 +71,27 @@ @attributes['subs'] = %(#{subs}) end # resolve the subs eagerly only if subs option is specified - lock_in_subs - # e.g., :subs => nil + # QUESTION should we skip subsequent calls to commit_subs? + commit_subs + # e.g., subs: nil else - @subs = [] + # NOTE @subs is initialized as empty array by super constructor # prevent subs from being resolved @default_subs = [] @attributes.delete 'subs' end # defer subs resolution; subs attribute is honored else - @subs = [] + # NOTE @subs is initialized as empty array by super constructor # QUESTION should we honor :default_subs option (i.e., @default_subs = opts[:default_subs])? @default_subs = nil end if (raw_source = opts[:source]).nil_or_empty? @lines = [] elsif ::String === raw_source - @lines = Helpers.normalize_lines_from_string raw_source + @lines = Helpers.prepare_source_string raw_source else - @lines = raw_source.dup + @lines = raw_source.drop 0 end end @@ -101,7 +102,7 @@ # # doc = Asciidoctor::Document.new # block = Asciidoctor::Block.new(doc, :paragraph, - # :source => '_This_ is what happens when you <meet> a stranger in the <alps>!') + # source: '_This_ is what happens when you <meet> a stranger in the <alps>!') # block.content # => "<em>This</em> is what happens when you <meet> a stranger in the <alps>!" def content @@ -109,10 +110,8 @@ when :compound super when :simple - apply_subs(@lines * EOL, @subs) + apply_subs((@lines.join LF), @subs) when :verbatim, :raw - #((apply_subs @lines.join(EOL), @subs).sub StripLineWiseRx, '\1') - # QUESTION could we use strip here instead of popping empty lines? # maybe apply_subs can know how to strip whitespace? result = apply_subs @lines, @subs @@ -121,20 +120,20 @@ else result.shift while (first = result[0]) && first.rstrip.empty? result.pop while (last = result[-1]) && last.rstrip.empty? - result * EOL + result.join LF end else - warn %(Unknown content model '#{@content_model}' for block: #{to_s}) unless @content_model == :empty + logger.warn %(Unknown content model '#{@content_model}' for block: #{to_s}) unless @content_model == :empty nil end end # Public: Returns the preprocessed source of this block # - # Returns the a String containing the lines joined together or nil if there - # are no lines + # Returns the a String containing the lines joined together or empty string + # if there are no lines def source - @lines * EOL + @lines.join LF end def to_s diff -Nru asciidoctor-1.5.5/lib/asciidoctor/callouts.rb asciidoctor-2.0.10/lib/asciidoctor/callouts.rb --- asciidoctor-1.5.5/lib/asciidoctor/callouts.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/callouts.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,4 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: Maintains a catalog of callouts and their associations. class Callouts @@ -28,7 +28,7 @@ # # Returns The unique String id of this callout def register li_ordinal - current_list << { :ordinal => li_ordinal.to_i, :id => (id = generate_next_callout_id) } + current_list << { ordinal: li_ordinal.to_i, id: (id = generate_next_callout_id) } @co_index += 1 id end @@ -59,7 +59,7 @@ # # Returns A space-separated String of callout ids associated with the specified list item def callout_ids li_ordinal - current_list.map {|element| element[:ordinal] == li_ordinal ? %(#{element[:id]} ) : nil }.join.chop + current_list.map {|it| it[:ordinal] == li_ordinal ? %(#{it[:id]} ) : '' }.join.chop end # Public: The current list for which callouts are being collected @@ -93,6 +93,8 @@ nil end + private + # Internal: Generate a unique id for the callout based on the internal indexes # # Returns A unique String id for this callout diff -Nru asciidoctor-1.5.5/lib/asciidoctor/cli/invoker.rb asciidoctor-2.0.10/lib/asciidoctor/cli/invoker.rb --- asciidoctor-1.5.5/lib/asciidoctor/cli/invoker.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/cli/invoker.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,13 +1,15 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor module Cli # Public Invocation class for starting Asciidoctor via CLI class Invoker + include Logging + attr_reader :options attr_reader :documents attr_reader :code - def initialize(*options) + def initialize *options @documents = [] @out = nil @err = nil @@ -29,101 +31,134 @@ end def invoke! - old_verbose = -1 return unless @options - old_verbose = $VERBOSE - case @options[:verbose] - when 0 - $VERBOSE = nil - when 1 - $VERBOSE = false - when 2 - $VERBOSE = true - end - + old_logger = old_logger_level = nil + old_verbose, $VERBOSE = $VERBOSE, @options[:warnings] opts = {} infiles = [] outfile = nil - tofile = nil + abs_srcdir_posix = nil + non_posix_env = ::File::ALT_SEPARATOR == RS + err = @err || $stderr + show_timings = false + @options.map do |key, val| case key when :input_files infiles = val when :output_file outfile = val + when :source_dir + if val + abs_srcdir_posix = ::File.expand_path val + abs_srcdir_posix = abs_srcdir_posix.tr RS, FS if non_posix_env && (abs_srcdir_posix.include? RS) + end when :destination_dir opts[:to_dir] = val if val when :attributes # NOTE processor will dup attributes internally opts[:attributes] = val + when :timings + show_timings = val when :trace - # currently, nothing + # no assignment + when :verbose + case val + when 0 + $VERBOSE = nil + old_logger, LoggerManager.logger = logger, NullLogger.new + when 2 + old_logger_level, logger.level = logger.level, ::Logger::Severity::DEBUG + end else opts[key] = val unless val.nil? end end - if infiles.size == 1 && infiles[0] == '-' - # allows use of block to supply stdin, particularly useful for tests - inputs = [block_given? ? yield : STDIN] - else - inputs = infiles.map {|infile| ::File.new infile, 'r'} + if infiles.size == 1 + if (infile0 = infiles[0]) == '-' + outfile ||= infile0 + stdin = true + elsif ::File.pipe? infile0 + outfile ||= '-' + end end - # NOTE if infile is stdin, default to outfile as stout - if outfile == '-' || (!outfile && infiles.size == 1 && infiles[0] == '-') - tofile = (@out || $stdout) + if outfile == '-' + # NOTE set_encoding returns nil on JRuby 9.1 + (tofile = @out) || ((tofile = $stdout).set_encoding UTF_8) elsif outfile - tofile = outfile opts[:mkdirs] = true + tofile = outfile else - # automatically calculate outfile based on infile unless to_dir is set - tofile = nil opts[:mkdirs] = true + # automatically calculate outfile based on infile end - show_timings = @options[:timings] - inputs.each do |input| - # NOTE processor will dup options and attributes internally - input_opts = tofile.nil? ? opts : opts.merge(:to_file => tofile) + if stdin + # allows use of block to supply stdin, particularly useful for tests + # NOTE set_encoding returns nil on JRuby 9.1 + block_given? ? (input = yield) : ((input = $stdin).set_encoding UTF_8, UTF_8) + input_opts = opts.merge to_file: tofile if show_timings - timings = Timings.new - @documents << ::Asciidoctor.convert(input, input_opts.merge(:timings => timings)) - timings.print_report((@err || $stderr), ((input.respond_to? :path) ? input.path : '-')) + @documents << (::Asciidoctor.convert input, (input_opts.merge timings: (timings = Timings.new))) + timings.print_report err, '-' else - @documents << ::Asciidoctor.convert(input, input_opts) + @documents << (::Asciidoctor.convert input, input_opts) + end + else + infiles.each do |infile| + input_opts = opts.merge to_file: tofile + if abs_srcdir_posix && (input_opts.key? :to_dir) + abs_indir = ::File.dirname ::File.expand_path infile + if non_posix_env + abs_indir_posix = (abs_indir.include? RS) ? (abs_indir.tr RS, FS) : abs_indir + else + abs_indir_posix = abs_indir + end + if abs_indir_posix.start_with? %(#{abs_srcdir_posix}/) + input_opts[:to_dir] += abs_indir.slice abs_srcdir_posix.length, abs_indir.length + end + end + if show_timings + @documents << (::Asciidoctor.convert_file infile, (input_opts.merge timings: (timings = Timings.new))) + timings.print_report err, infile + else + @documents << (::Asciidoctor.convert_file infile, input_opts) + end end end + @code = 1 if (logger.respond_to? :max_severity) && logger.max_severity && logger.max_severity >= opts[:failure_level] rescue ::Exception => e if ::SignalException === e @code = e.signo - # add extra endline if Ctrl+C is used - (@err || $stderr).puts if ::Interrupt === e + # add extra newline if Ctrl+C is used + err.puts if ::Interrupt === e else @code = (e.respond_to? :status) ? e.status : 1 if @options[:trace] raise e else - err = (@err || $stderr) - if ::RuntimeError === e - err.puts %(#{e.message} (#{e.class})) - else - err.puts e.message - end + err.puts ::RuntimeError === e ? %(#{e.message} (#{e.class})) : e.message err.puts ' Use --trace for backtrace' end end nil ensure - $VERBOSE = old_verbose unless old_verbose == -1 + $VERBOSE = old_verbose + if old_logger + LoggerManager.logger = old_logger + elsif old_logger_level + logger.level = old_logger_level + end end def document @documents[0] end - def redirect_streams(out, err = nil) + def redirect_streams out, err = nil @out = out @err = err end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/cli/options.rb asciidoctor-2.0.10/lib/asciidoctor/cli/options.rb --- asciidoctor-1.5.5/lib/asciidoctor/cli/options.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/cli/options.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,30 +1,31 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor module Cli + FS = ?/ + RS = ?\\ # Public: List of options that can be specified on the command line class Options < ::Hash def initialize(options = {}) self[:attributes] = options[:attributes] || {} - self[:input_files] = options[:input_files] || nil - self[:output_file] = options[:output_file] || nil + self[:input_files] = options[:input_files] + self[:output_file] = options[:output_file] self[:safe] = options[:safe] || SafeMode::UNSAFE - self[:header_footer] = options[:header_footer] || true - self[:template_dirs] = options[:template_dirs] || nil - self[:template_engine] = options[:template_engine] || nil - if options[:doctype] - self[:attributes]['doctype'] = options[:doctype] - end - if options[:backend] - self[:attributes]['backend'] = options[:backend] - end - self[:eruby] = options[:eruby] || nil - self[:verbose] = options[:verbose] || 1 - self[:load_paths] = options[:load_paths] || nil - self[:requires] = options[:requires] || nil + self[:standalone] = options.fetch :standalone, true + self[:template_dirs] = options[:template_dirs] + self[:template_engine] = options[:template_engine] + self[:attributes]['doctype'] = options[:doctype] if options[:doctype] + self[:attributes]['backend'] = options[:backend] if options[:backend] + self[:eruby] = options[:eruby] + self[:verbose] = options.fetch :verbose, 1 + self[:warnings] = options.fetch :warnings, false + self[:load_paths] = options[:load_paths] + self[:requires] = options[:requires] self[:base_dir] = options[:base_dir] - self[:destination_dir] = options[:destination_dir] || nil + self[:source_dir] = options[:source_dir] + self[:destination_dir] = options[:destination_dir] + self[:failure_level] = ::Logger::Severity::FATAL self[:trace] = false self[:timings] = false end @@ -35,15 +36,16 @@ def parse!(args) opts_parser = ::OptionParser.new do |opts| - opts.banner = <<-EOS -Usage: asciidoctor [OPTION]... FILE... -Translate the AsciiDoc source FILE or FILE(s) into the backend output format (e.g., HTML 5, DocBook 4.5, etc.) -By default, the output is written to a file with the basename of the source file and the appropriate extension. -Example: asciidoctor -b html5 source.asciidoc + # NOTE don't use squiggly heredoc to maintain compatibility with Ruby < 2.3 + opts.banner = <<-'EOS'.gsub ' ', '' + Usage: asciidoctor [OPTION]... FILE... + Translate the AsciiDoc source FILE or FILE(s) into the backend output format (e.g., HTML 5, DocBook 5, etc.) + By default, the output is written to a file with the basename of the source file and the appropriate extension. + Example: asciidoctor -b html5 source.asciidoc EOS - opts.on('-b', '--backend BACKEND', 'set output format backend: [html5, xhtml5, docbook5, docbook45, manpage] (default: html5)', + opts.on('-b', '--backend BACKEND', 'set output format backend: [html5, xhtml5, docbook5, manpage] (default: html5)', 'additional backends are supported via extensions (e.g., pdf, latex)') do |backend| self[:attributes]['backend'] = backend end @@ -51,49 +53,47 @@ 'document type to use when converting document: [article, book, manpage, inline] (default: article)') do |doc_type| self[:attributes]['doctype'] = doc_type end + opts.on('-e', '--embedded', 'suppress enclosing document structure and output an embedded document (default: false)') do + self[:standalone] = false + end opts.on('-o', '--out-file FILE', 'output file (default: based on path of input file); use - to output to STDOUT') do |output_file| self[:output_file] = output_file end opts.on('--safe', 'set safe mode level to safe (default: unsafe)', - 'enables include macros, but restricts access to ancestor paths of source file', + 'enables include directives, but prevents access to ancestor paths of source file', 'provided for compatibility with the asciidoc command') do self[:safe] = SafeMode::SAFE end - opts.on('-S', '--safe-mode SAFE_MODE', (safe_mode_names = SafeMode.constants.map(&:to_s).map(&:downcase)), - %(set safe mode level explicitly: [#{safe_mode_names * ', '}] (default: unsafe)), - 'disables potentially dangerous macros in source files, such as include::[]') do |safe_mode| - self[:safe] = SafeMode.const_get safe_mode.upcase + opts.on('-S', '--safe-mode SAFE_MODE', (safe_mode_names = SafeMode.names), + %(set safe mode level explicitly: [#{safe_mode_names.join ', '}] (default: unsafe)), + 'disables potentially dangerous macros in source files, such as include::[]') do |name| + self[:safe] = SafeMode.value_for_name name end - opts.on('-s', '--no-header-footer', 'suppress output of header and footer (default: false)') do - self[:header_footer] = false + opts.on('-s', '--no-header-footer', 'suppress enclosing document structure and output an embedded document (default: false)') do + self[:standalone] = false end opts.on('-n', '--section-numbers', 'auto-number section titles in the HTML backend; disabled by default') do self[:attributes]['sectnums'] = '' end - opts.on('-e', '--eruby ERUBY', ['erb', 'erubis'], + opts.on('--eruby ERUBY', ['erb', 'erubis'], 'specify eRuby implementation to use when rendering custom ERB templates: [erb, erubis] (default: erb)') do |eruby| self[:eruby] = eruby end - opts.on('-C', '--compact', 'compact the output by removing blank lines. (No longer in use)') do - end - opts.on('-a', '--attribute key[=value]', 'a document attribute to set in the form of key, key! or key=value pair', - 'unless @ is appended to the value, this attributes takes precedence over attributes', - 'defined in the source document') do |attr| - key, val = attr.split '=', 2 - val = val ? (FORCE_ENCODING ? (val.force_encoding ::Encoding::UTF_8) : val) : '' - # move leading ! to end for internal processing - #if !val && key.start_with?('!') - # key = "#{key[1..-1]}!" - #end - self[:attributes][key] = val + opts.on('-a', '--attribute name[=value]', 'a document attribute to set in the form of name, name!, or name=value pair', + 'this attribute takes precedence over the same attribute defined in the source document', + 'unless either the name or value ends in @ (i.e., name@=value or name=value@)') do |attr| + next if (attr = attr.rstrip).empty? || attr == '=' + attr = attr.encode UTF_8 unless attr.encoding == UTF_8 + name, _, val = attr.partition '=' + self[:attributes][name] = val end opts.on('-T', '--template-dir DIR', 'a directory containing custom converter templates that override the built-in converter (requires tilt gem)', 'may be specified multiple times') do |template_dir| if self[:template_dirs].nil? self[:template_dirs] = [template_dir] elsif ::Array === self[:template_dirs] - self[:template_dirs].push template_dir + self[:template_dirs] << template_dir else self[:template_dirs] = [self[:template_dirs], template_dir] end @@ -104,6 +104,9 @@ opts.on('-B', '--base-dir DIR', 'base directory containing the document and resources (default: directory of source file)') do |base_dir| self[:base_dir] = base_dir end + opts.on('-R', '--source-dir DIR', 'source root directory (used for calculating path in destination directory)') do |src_dir| + self[:source_dir] = src_dir + end opts.on('-D', '--destination-dir DIR', 'destination output directory (default: directory of source file)') do |dest_dir| self[:destination_dir] = dest_dir end @@ -115,35 +118,82 @@ 'may be specified more than once') do |path| (self[:requires] ||= []).concat(path.split ',') end - opts.on('-q', '--quiet', 'suppress warnings (default: false)') do |verbose| + opts.on('--failure-level LEVEL', %w(warning WARNING error ERROR info INFO), 'set minimum logging level that triggers non-zero exit code: [WARN, ERROR, INFO] (default: FATAL)') do |level| + level = 'WARN' if (level = level.upcase) == 'WARNING' + self[:failure_level] = ::Logger::Severity.const_get level, false + end + opts.on('-q', '--quiet', 'silence application log messages and script warnings (default: false)') do |verbose| self[:verbose] = 0 end - opts.on('--trace', 'include backtrace information on errors (default: false)') do |trace| + opts.on('--trace', 'include backtrace information when reporting errors (default: false)') do |trace| self[:trace] = true end opts.on('-v', '--verbose', 'enable verbose mode (default: false)') do |verbose| self[:verbose] = 2 end - opts.on('-t', '--timings', 'enable timings mode (default: false)') do |timing| + opts.on('-w', '--warnings', 'turn on script warnings (default: false)') do |warnings| + self[:warnings] = true + end + opts.on('-t', '--timings', 'print timings report (default: false)') do |timing| self[:timings] = true end - - opts.on_tail('-h', '--help', 'show this message') do - $stdout.puts opts + opts.on_tail('-h', '--help [TOPIC]', 'print a help message', + 'show this usage if TOPIC is not specified or recognized', + 'show an overview of the AsciiDoc syntax if TOPIC is syntax', + 'dump the Asciidoctor man page (in troff/groff format) if TOPIC is manpage') do |topic| + case topic + # use `asciidoctor -h manpage | man -l -` to view with man pager + when 'manpage' + if (manpage_path = ::ENV['ASCIIDOCTOR_MANPAGE_PATH']) + if ::File.exist? manpage_path + if manpage_path.end_with? '.gz' + require 'zlib' unless defined? ::Zlib::GzipReader + $stdout.puts ::Zlib::GzipReader.open(manpage_path) {|gz| gz.read } + else + $stdout.puts ::File.read manpage_path + end + else + $stderr.puts %(asciidoctor: FAILED: manual page not found: #{manpage_path}) + return 1 + end + # Ruby 2.3 requires the extra brackets around the ::File.join method call + elsif ::File.exist? (manpage_path = (::File.join ROOT_DIR, 'man', 'asciidoctor.1')) + $stdout.puts ::File.read manpage_path + else + manpage_path = `man -w asciidoctor`.chop rescue '' + if manpage_path.empty? + $stderr.puts 'asciidoctor: FAILED: manual page not found; try `man asciidoctor`' + return 1 + elsif manpage_path.end_with? '.gz' + require 'zlib' unless defined? ::Zlib::GzipReader + $stdout.puts ::Zlib::GzipReader.open(manpage_path) {|gz| gz.read } + else + $stdout.puts ::File.read manpage_path + end + end + when 'syntax' + # Ruby 2.3 requires the extra brackets around the ::File.join method call + if ::File.exist? (syntax_path = (::File.join ROOT_DIR, 'data', 'reference', 'syntax.adoc')) + $stdout.puts ::File.read syntax_path + else + $stderr.puts 'asciidoctor: FAILED: syntax page not found; visit https://asciidoctor.org/docs' + return 1 + end + else + $stdout.puts opts + end return 0 end - opts.on_tail('-V', '--version', 'display the version and runtime environment (or -v if no other flags or arguments)') do return print_version $stdout end - end - infiles = [] + old_verbose, $VERBOSE = $VERBOSE, (args.include? '-w') opts_parser.parse! args if args.empty? - if self[:verbose] == 2 + if self[:verbose] == 2 # -v flag was specified return print_version $stdout else $stderr.puts opts_parser @@ -151,51 +201,58 @@ end end + infiles = [] # shave off the file to process so that options errors appear correctly if args.size == 1 && args[0] == '-' - infiles.push args.pop + infiles << args.pop elsif args.each do |file| - if file == '-' || (file.start_with? '-') + if file.start_with? '-' # warn, but don't panic; we may have enough to proceed, so we won't force a failure - $stderr.puts "asciidoctor: WARNING: extra arguments detected (unparsed arguments: #{args.map{|a| "'#{a}'"} * ', '}) or incorrect usage of stdin" + $stderr.puts %(asciidoctor: WARNING: extra arguments detected (unparsed arguments: '#{args.join "', '"}') or incorrect usage of stdin) + elsif ::File.file? file + infiles << file + # NOTE only attempt to glob if file is not found else - if ::File.readable? file - matches = [file] + # Tilt backslashes in Windows paths the Ruby-friendly way + if ::File::ALT_SEPARATOR == RS && (file.include? RS) + file = file.tr RS, FS + end + if (matches = ::Dir.glob file).empty? + # NOTE if no matches, assume it's just a missing file and proceed + infiles << file else - # Tilt backslashes in Windows paths the Ruby-friendly way - if ::File::ALT_SEPARATOR == '\\' && (file.include? '\\') - file = file.tr '\\', '/' - end - if (matches = ::Dir.glob file).empty? - $stderr.puts %(asciidoctor: FAILED: input file #{file} missing or cannot be read) - return 1 - end + infiles.concat matches end - - infiles.concat matches end end end - infiles.each do |file| - unless file == '-' || (::File.file? file) - if ::File.readable? file - $stderr.puts %(asciidoctor: FAILED: input path #{file} is a #{(::File.stat file).ftype}, not a file) + infiles.reject {|file| file == '-' }.each do |file| + begin + fstat = ::File.stat file + if fstat.file? || fstat.pipe? + unless fstat.readable? + $stderr.puts %(asciidoctor: FAILED: input file #{file} is not readable) + return 1 + end else - $stderr.puts %(asciidoctor: FAILED: input file #{file} missing or cannot be read) + $stderr.puts %(asciidoctor: FAILED: input path #{file} is a #{fstat.ftype}, not a file) + return 1 end + rescue ::Errno::ENOENT + $stderr.puts %(asciidoctor: FAILED: input file #{file} is missing) return 1 end end self[:input_files] = infiles - self.delete(:attributes) if self[:attributes].empty? + self.delete :attributes if self[:attributes].empty? if self[:template_dirs] begin - require 'tilt' unless defined? ::Tilt + require 'tilt' unless defined? ::Tilt.new rescue ::LoadError raise $! if self[:trace] $stderr.puts 'asciidoctor: FAILED: \'tilt\' could not be loaded' @@ -208,13 +265,13 @@ end if (load_paths = self[:load_paths]) - (self[:load_paths] = load_paths.uniq).reverse_each do |path| - $:.unshift File.expand_path(path) - end + load_paths.uniq! + load_paths.reverse_each {|path| $:.unshift ::File.expand_path path } end if (requires = self[:requires]) - (self[:requires] = requires.uniq).each do |path| + requires.uniq! + requires.each do |path| begin require path rescue ::LoadError @@ -237,18 +294,16 @@ $stderr.puts %(asciidoctor: #{$!.message}) $stdout.puts opts_parser return 1 + ensure + $VERBOSE = old_verbose end def print_version os = $stdout - os.puts %(Asciidoctor #{::Asciidoctor::VERSION} [http://asciidoctor.org]) - if RUBY_VERSION >= '1.9.3' - encoding_info = {'lc' => 'locale', 'fs' => 'filesystem', 'in' => 'internal', 'ex' => 'external'}.map do |k,v| - %(#{k}:#{::Encoding.find(v) || '-'}) - end - os.puts %(Runtime Environment (#{RUBY_DESCRIPTION}) (#{encoding_info * ' '})) - else - os.puts %(Runtime Environment (#{RUBY_DESCRIPTION})) + os.puts %(Asciidoctor #{::Asciidoctor::VERSION} [https://asciidoctor.org]) + encoding_info = { 'lc' => 'locale', 'fs' => 'filesystem', 'in' => 'internal', 'ex' => 'external' }.map do |k, v| + %(#{k}:#{v == 'internal' ? (::File.open(__FILE__) {|f| f.getc.encoding }) : (::Encoding.find v)}) end + os.puts %(Runtime Environment (#{::RUBY_DESCRIPTION}) (#{encoding_info.join ' '})) 0 end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/cli.rb asciidoctor-2.0.10/lib/asciidoctor/cli.rb --- asciidoctor-1.5.5/lib/asciidoctor/cli.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/cli.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,3 +1,4 @@ +# frozen_string_literal: true require 'optparse' -require 'asciidoctor/cli/options' -require 'asciidoctor/cli/invoker' +require_relative 'cli/options' +require_relative 'cli/invoker' diff -Nru asciidoctor-1.5.5/lib/asciidoctor/converter/base.rb asciidoctor-2.0.10/lib/asciidoctor/converter/base.rb --- asciidoctor-1.5.5/lib/asciidoctor/converter/base.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/converter/base.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -# encoding: UTF-8 -module Asciidoctor - module Converter; end # required for Opal - - # An abstract base class for defining converters that can be used to convert - # {AbstractNode} objects in a parsed AsciiDoc document to a backend format - # such as HTML or DocBook. - # - # Concrete subclasses must implement the {#convert} method and, optionally, - # the {#convert_with_options} method. - class Converter::Base - include Converter - end - - # An abstract base class for built-in {Converter} classes. - class Converter::BuiltIn - def initialize backend, opts = {} - end - - # Public: Converts the specified {AbstractNode} using the specified - # transform and optionally additional options (when not empty). - # - # CAUTION: Method that handles the specified transform *may not* accept the - # second argument with additional options, in which case an {ArgumentError} - # is raised if the given +opts+ Hash is not nil. The additional options are - # used in template-based backends to access convert helper methods such as - # outline. - # - # See {Converter#convert} for more details. - # - # Returns the [String] result of conversion - def convert node, transform = nil, opts = {} - transform ||= node.node_name - opts.empty? ? (send transform, node) : (send transform, node, opts) - end - - alias :handles? :respond_to? - - # Public: Returns the converted content of the {AbstractNode}. - # - # Returns the converted [String] content of the {AbstractNode}. - def content node - node.content - end - - alias :pass :content - - # Public: Skips conversion of the {AbstractNode}. - # - # Returns [NilClass] - def skip node - nil - end - end -end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/converter/composite.rb asciidoctor-2.0.10/lib/asciidoctor/converter/composite.rb --- asciidoctor-1.5.5/lib/asciidoctor/converter/composite.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/converter/composite.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,55 +1,47 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor - # A {Converter} implementation that delegates to the chain of {Converter} - # objects passed to the constructor. Selects the first {Converter} that - # identifies itself as the handler for a given transform. - class Converter::CompositeConverter < Converter::Base - - # Get the Array of Converter objects in the chain - attr_reader :converters - - def initialize backend, *converters - @backend = backend - (@converters = converters.flatten.compact).each do |converter| - converter.composed self if converter.respond_to? :composed - end - @converter_map = {} - end - - # Public: Delegates to the first converter that identifies itself as the - # handler for the given transform. The optional Hash is passed as the last - # option to the delegate's convert method. - # - # node - the AbstractNode to convert - # transform - the optional String transform, or the name of the node if no - # transform is specified. (default: nil) - # opts - an optional Hash that is passed to the delegate's convert method. (default: {}) - # - # Returns the String result returned from the delegate's convert method - def convert node, transform = nil, opts = {} - transform ||= node.node_name - (converter_for transform).convert node, transform, opts - end +# A {Converter} implementation that delegates to the chain of {Converter} +# objects passed to the constructor. Selects the first {Converter} that +# identifies itself as the handler for a given transform. +class Converter::CompositeConverter < Converter::Base + # Get the Array of Converter objects in the chain + attr_reader :converters + + def initialize backend, *converters, backend_traits_source: nil + @backend = backend + (@converters = converters).each {|converter| converter.composed self if converter.respond_to? :composed } + init_backend_traits backend_traits_source.backend_traits if backend_traits_source + @converter_cache = ::Hash.new {|hash, key| hash[key] = find_converter key } + end - # Alias for backward compatibility. - alias :convert_with_options :convert + # Public: Delegates to the first converter that identifies itself as the + # handler for the given transform. The optional Hash is passed as the last + # option to the delegate's convert method. + # + # node - the AbstractNode to convert + # transform - the optional String transform, or the name of the node if no + # transform is specified. (default: nil) + # opts - an optional Hash that is passed to the delegate's convert method. (default: nil) + # + # Returns the String result returned from the delegate's convert method + def convert node, transform = nil, opts = nil + (converter_for transform ||= node.node_name).convert node, transform, opts + end - # Public: Retrieve the converter for the specified transform. - # - # Returns the matching [Converter] object - def converter_for transform - @converter_map[transform] ||= (find_converter transform) - end + # Public: Retrieve the converter for the specified transform. + # + # Returns the matching [Converter] object + def converter_for transform + @converter_cache[transform] + end - # Internal: Find the converter for the specified transform. - # Raise an exception if no converter is found. - # - # Returns the matching [Converter] object - def find_converter transform - @converters.each do |candidate| - return candidate if candidate.handles? transform - end - raise %(Could not find a converter to handle transform: #{transform}) - end + # Public: Find the converter for the specified transform. + # Raise an exception if no converter is found. + # + # Returns the matching [Converter] object + def find_converter transform + @converters.each {|candidate| return candidate if candidate.handles? transform } + raise %(Could not find a converter to handle transform: #{transform}) end end +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/converter/docbook45.rb asciidoctor-2.0.10/lib/asciidoctor/converter/docbook45.rb --- asciidoctor-1.5.5/lib/asciidoctor/converter/docbook45.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/converter/docbook45.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -# encoding: UTF-8 -require 'asciidoctor/converter/docbook5' - -module Asciidoctor - # A built-in {Converter} implementation that generates DocBook 4.5 output - # consistent with the docbook45 backend from AsciiDoc Python. - class Converter::DocBook45Converter < Converter::DocBook5Converter - def admonition node - # address a bug in the DocBook 4.5 DTD - if node.parent.context == :example - %(<para> -#{super} -</para>) - else - super - end - end - - def olist node - result = [] - num_attribute = node.style ? %( numeration="#{node.style}") : nil - start_attribute = (node.attr? 'start') ? %( override="#{node.attr 'start'}") : nil - result << %(<orderedlist#{common_attributes node.id, node.role, node.reftext}#{num_attribute}>) - result << %(<title>#{node.title}) if node.title? - node.items.each_with_index do |item, idx| - result << (idx == 0 ? %() : '') - result << %(#{item.text}) - result << item.content if item.blocks? - result << '' - end - result << %() - result * EOL - end - - def inline_anchor node - case node.type - when :ref - %() - when :xref - if (path = node.attributes['path']) - # QUESTION should we use refid as fallback text instead? (like the html5 backend?) - %(#{node.text || path}) - else - linkend = node.attributes['fragment'] || node.target - (text = node.text) ? %(#{text}) : %() - end - when :link - %(#{node.text}) - when :bibref - target = node.target - %([#{target}]) - end - end - - def author_element doc, index = nil - firstname_key = index ? %(firstname_#{index}) : 'firstname' - middlename_key = index ? %(middlename_#{index}) : 'middlename' - lastname_key = index ? %(lastname_#{index}) : 'lastname' - email_key = index ? %(email_#{index}) : 'email' - - result = [] - result << '' - result << %(#{doc.attr firstname_key}) if doc.attr? firstname_key - result << %(#{doc.attr middlename_key}) if doc.attr? middlename_key - result << %(#{doc.attr lastname_key}) if doc.attr? lastname_key - result << %(#{doc.attr email_key}) if doc.attr? email_key - result << '' - - result * EOL - end - - def common_attributes id, role = nil, reftext = nil - res = id ? %( id="#{id}") : '' - res = %(#{res} role="#{role}") if role - res = %(#{res} xreflabel="#{reftext}") if reftext - res - end - - def doctype_declaration root_tag_name - %() - end - - def document_info_element doc, info_tag_prefix - super doc, info_tag_prefix, true - end - - def lang_attribute_name - 'lang' - end - - def document_ns_attributes doc - if (ns = doc.attr 'xmlns') - ns.empty? ? ' xmlns="http://docbook.org/ns/docbook"' : %( xmlns="#{ns}") - else - nil - end - end - end -end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/converter/docbook5.rb asciidoctor-2.0.10/lib/asciidoctor/converter/docbook5.rb --- asciidoctor-1.5.5/lib/asciidoctor/converter/docbook5.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/converter/docbook5.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,753 +1,783 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor - # A built-in {Converter} implementation that generates DocBook 5 output - # similar to the docbook45 backend from AsciiDoc Python, but migrated to the - # DocBook 5 specification. - class Converter::DocBook5Converter < Converter::BuiltIn - def document node - result = [] - if (root_tag_name = node.doctype) == 'manpage' - root_tag_name = 'refentry' - end - result << '' - if (doctype_line = doctype_declaration root_tag_name) - result << doctype_line - end - if node.attr? 'toc' - if node.attr? 'toclevels' - result << %() - else - result << '' - end - end - if node.attr? 'sectnums' - if node.attr? 'sectnumlevels' - result << %() - else - result << '' - end - end - lang_attribute = (node.attr? 'nolang') ? nil : %( #{lang_attribute_name}="#{node.attr 'lang', 'en'}") - result << %(<#{root_tag_name}#{document_ns_attributes node}#{lang_attribute}>) - result << (document_info_element node, root_tag_name) - result << node.content if node.blocks? - unless (footer_docinfo = node.docinfo :footer).empty? - result << footer_docinfo - end - result << %() - - result * EOL - end - - alias :embedded :content - - def section node - doctype = node.document.doctype - if node.special - if (tag_name = node.sectname).start_with? 'sect' - # a normal child section of a special section - tag_name = 'section' - end - else - tag_name = doctype == 'book' && node.level <= 1 ? (node.level == 0 ? 'part' : 'chapter') : 'section' - end - if doctype == 'manpage' - if tag_name == 'section' - tag_name = 'refsection' - elsif tag_name == 'synopsis' - tag_name = 'refsynopsisdiv' - end - end - %(<#{tag_name}#{common_attributes node.id, node.role, node.reftext}> -#{node.title} -#{node.content} -) +# A built-in {Converter} implementation that generates DocBook 5 output. The output is inspired by the output produced +# by the docbook45 backend from AsciiDoc Python, except it has been migrated to the DocBook 5 specification. +class Converter::DocBook5Converter < Converter::Base + register_for 'docbook5' + + # default represents variablelist + (DLIST_TAGS = { + 'qanda' => { list: 'qandaset', entry: 'qandaentry', label: 'question', term: 'simpara', item: 'answer' }, + 'glossary' => { list: nil, entry: 'glossentry', term: 'glossterm', item: 'glossdef' }, + }).default = { list: 'variablelist', entry: 'varlistentry', term: 'term', item: 'listitem' } + + (QUOTE_TAGS = { + monospaced: ['', ''], + emphasis: ['', '', true], + strong: ['', '', true], + double: ['', '', true], + single: ['', '', true], + mark: ['', ''], + superscript: ['', ''], + subscript: ['', ''], + }).default = ['', '', true] + + MANPAGE_SECTION_TAGS = { 'section' => 'refsection', 'synopsis' => 'refsynopsisdiv' } + TABLE_PI_NAMES = ['dbhtml', 'dbfo', 'dblatex'] + + CopyrightRx = /^(#{CC_ANY}+?)(?: ((?:\d{4}\-)?\d{4}))?$/ + ImageMacroRx = /^image::?(\S|\S#{CC_ANY}*?\S)\[(#{CC_ANY}+)?\]$/ + + def initialize backend, opts = {} + @backend = backend + init_backend_traits basebackend: 'docbook', filetype: 'xml', outfilesuffix: '.xml', supports_templates: true + end + + def convert_document node + result = [''] + result << ((node.attr? 'toclevels') ? %() : '') if node.attr? 'toc' + result << ((node.attr? 'sectnumlevels') ? %() : '') if node.attr? 'sectnums' + lang_attribute = (node.attr? 'nolang') ? '' : %( xml:lang="#{node.attr 'lang', 'en'}") + if (root_tag_name = node.doctype) == 'manpage' + root_tag_name = 'refentry' + end + result << %(<#{root_tag_name} xmlns="http://docbook.org/ns/docbook" xmlns:xl="http://www.w3.org/1999/xlink" version="5.0"#{lang_attribute}#{common_attributes node.id}>) + result << (document_info_tag node) unless node.noheader + unless (docinfo_content = node.docinfo :header).empty? + result << docinfo_content + end + result << node.content if node.blocks? + unless (docinfo_content = node.docinfo :footer).empty? + result << docinfo_content end + result << %() + result.join LF + end + + alias convert_embedded content_only - def admonition node - %(<#{tag_name = node.attr 'name'}#{common_attributes node.id, node.role, node.reftext}> -#{title_tag node}#{resolve_content node} + def convert_section node + if node.document.doctype == 'manpage' + tag_name = MANPAGE_SECTION_TAGS[tag_name = node.sectname] || tag_name + else + tag_name = node.sectname + end + title_el = node.special && (node.option? 'untitled') ? '' : %(#{node.title}\n) + %(<#{tag_name}#{common_attributes node.id, node.role, node.reftext}> +#{title_el}#{node.content} ) - end + end - alias :audio :skip + def convert_admonition node + %(<#{tag_name = node.attr 'name'}#{common_attributes node.id, node.role, node.reftext}> +#{title_tag node}#{enclose_content node} +) + end - def colist node - result = [] - result << %() - result << %(#{node.title}) if node.title? - node.items.each do |item| - result << %() - result << %(#{item.text}) - result << item.content if item.blocks? - result << '' - end - result << %() - result * EOL + alias convert_audio skip + + def convert_colist node + result = [] + result << %() + result << %(#{node.title}) if node.title? + node.items.each do |item| + result << %() + result << %(#{item.text}) + result << item.content if item.blocks? + result << '' end + result << %() + result.join LF + end - (DLIST_TAGS = { - 'labeled' => { - :list => 'variablelist', - :entry => 'varlistentry', - :term => 'term', - :item => 'listitem' - }, - 'qanda' => { - :list => 'qandaset', - :entry => 'qandaentry', - :label => 'question', - :term => 'simpara', - :item => 'answer' - }, - 'glossary' => { - :list => nil, - :entry => 'glossentry', - :term => 'glossterm', - :item => 'glossdef' - } - }).default = { # default value == DLIST['labeled'], expanded for Opal - :list => 'variablelist', - :entry => 'varlistentry', - :term => 'term', - :item => 'listitem' - } - - def dlist node - result = [] - if node.style == 'horizontal' - result << %(<#{tag_name = node.title? ? 'table' : 'informaltable'}#{common_attributes node.id, node.role, node.reftext} tabstyle="horizontal" frame="none" colsep="0" rowsep="0"> + def convert_dlist node + result = [] + if node.style == 'horizontal' + result << %(<#{tag_name = node.title? ? 'table' : 'informaltable'}#{common_attributes node.id, node.role, node.reftext} tabstyle="horizontal" frame="none" colsep="0" rowsep="0"> #{title_tag node} ) - node.items.each do |terms, dd| - result << %( + node.items.each do |terms, dd| + result << %( ) - [*terms].each do |dt| - result << %(#{dt.text}) - end - result << %( + terms.each {|dt| result << %(#{dt.text}) } + result << %( ) - unless dd.nil? - result << %(#{dd.text}) if dd.text? - result << dd.content if dd.blocks? - end - result << %( -) + if dd + result << %(#{dd.text}) if dd.text? + result << dd.content if dd.blocks? end - result << %( + result << %( +) + end + result << %( ) - else - tags = DLIST_TAGS[node.style] - list_tag = tags[:list] - entry_tag = tags[:entry] - label_tag = tags[:label] - term_tag = tags[:term] - item_tag = tags[:item] - if list_tag - result << %(<#{list_tag}#{common_attributes node.id, node.role, node.reftext}>) - result << %(#{node.title}) if node.title? - end - - node.items.each do |terms, dd| - result << %(<#{entry_tag}>) - result << %(<#{label_tag}>) if label_tag - - [*terms].each do |dt| - result << %(<#{term_tag}>#{dt.text}) - end + else + tags = DLIST_TAGS[node.style] + list_tag = tags[:list] + entry_tag = tags[:entry] + label_tag = tags[:label] + term_tag = tags[:term] + item_tag = tags[:item] + if list_tag + result << %(<#{list_tag}#{common_attributes node.id, node.role, node.reftext}>) + result << %(#{node.title}) if node.title? + end - result << %() if label_tag - result << %(<#{item_tag}>) - unless dd.nil? - result << %(#{dd.text}) if dd.text? - result << dd.content if dd.blocks? - end - result << %() - result << %() + node.items.each do |terms, dd| + result << %(<#{entry_tag}>) + result << %(<#{label_tag}>) if label_tag + terms.each {|dt| result << %(<#{term_tag}>#{dt.text}) } + result << %() if label_tag + result << %(<#{item_tag}>) + if dd + result << %(#{dd.text}) if dd.text? + result << dd.content if dd.blocks? end - - result << %() if list_tag + result << %() + result << %() end - result * EOL + result << %() if list_tag end - def example node - if node.title? - %( + result.join LF + end + + def convert_example node + if node.title? + %( #{node.title} -#{resolve_content node} +#{enclose_content node} ) - else - %( -#{resolve_content node} + else + %( +#{enclose_content node} ) - end end + end - def floating_title node - %(#{node.title}) - end + def convert_floating_title node + %(#{node.title}) + end - def image node - width_attribute = (node.attr? 'width') ? %( contentwidth="#{node.attr 'width'}") : nil - depth_attribute = (node.attr? 'height') ? %( contentdepth="#{node.attr 'height'}") : nil - # FIXME if scaledwidth is set, we should remove width & depth - # See http://www.docbook.org/tdg/en/html/imagedata.html#d0e92271 for details - swidth_attribute = (node.attr? 'scaledwidth') ? %( width="#{node.attr 'scaledwidth'}" scalefit="1") : nil - scale_attribute = (node.attr? 'scale') ? %( scale="#{node.attr 'scale'}") : nil - align_attribute = (node.attr? 'align') ? %( align="#{node.attr 'align'}") : nil + def convert_image node + # NOTE according to the DocBook spec, content area, scaling, and scaling to fit are mutually exclusive + # See http://tdg.docbook.org/tdg/4.5/imagedata-x.html#d0e79635 + if node.attr? 'scaledwidth' + width_attribute = %( width="#{node.attr 'scaledwidth'}") + depth_attribute = '' + scale_attribute = '' + elsif node.attr? 'scale' + # QUESTION should we set the viewport using width and depth? (the scaled image would be contained within this box) + #width_attribute = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : '' + #depth_attribute = (node.attr? 'height') ? %( depth="#{node.attr 'height'}") : '' + scale_attribute = %( scale="#{node.attr 'scale'}") + else + width_attribute = (node.attr? 'width') ? %( contentwidth="#{node.attr 'width'}") : '' + depth_attribute = (node.attr? 'height') ? %( contentdepth="#{node.attr 'height'}") : '' + scale_attribute = '' + end + align_attribute = (node.attr? 'align') ? %( align="#{node.attr 'align'}") : '' - mediaobject = %( + mediaobject = %( - + -#{node.attr 'alt'} +#{node.alt} ) - if node.title? - %( + if node.title? + %( #{node.title} #{mediaobject} ) - else - %( + else + %( #{mediaobject} ) - end end + end - def listing node - informal = !node.title? - listing_attributes = (common_attributes node.id, node.role, node.reftext) - if node.style == 'source' && (node.attr? 'language') - numbering = (node.attr? 'linenums') ? 'numbered' : 'unnumbered' - listing_content = %(#{node.content}) + def convert_listing node + informal = !node.title? + common_attrs = common_attributes node.id, node.role, node.reftext + if node.style == 'source' + if (attrs = node.attributes).key? 'linenums' + numbering_attrs = (attrs.key? 'start') ? %( linenumbering="numbered" startinglinenumber="#{attrs['start'].to_i}") : ' linenumbering="numbered"' else - listing_content = %(#{node.content}) + numbering_attrs = ' linenumbering="unnumbered"' end - if informal - listing_content + if attrs.key? 'language' + wrapped_content = %(#{node.content}) else - %( + wrapped_content = %(#{node.content}) + end + else + wrapped_content = %(#{node.content}) + end + informal ? wrapped_content : %( #{node.title} -#{listing_content} +#{wrapped_content} ) - end - end + end - def literal node - if node.title? - %( + def convert_literal node + if node.title? + %( #{node.title} #{node.content} ) - else - %(#{node.content}) - end + else + %(#{node.content}) end + end - def stem node - if (idx = node.subs.index :specialcharacters) - node.subs.delete :specialcharacters - end - equation = node.content - node.subs.insert idx, :specialcharacters if idx - if node.style == 'asciimath' - if ((defined? ::AsciiMath) || ((defined? @asciimath_available) ? @asciimath_available : - (@asciimath_available = Helpers.require_library 'asciimath', true, :warn))) - # NOTE fop requires jeuclid to process raw mathml - equation_data = (::AsciiMath.parse equation).to_mathml 'mml:', 'xmlns:mml' => 'http://www.w3.org/1998/Math/MathML' - else - equation_data = %() - end - else - # unhandled math; pass source to alt and required mathphrase element; dblatex will process alt as LaTeX math - equation_data = %( + alias convert_pass content_only + + def convert_stem node + if (idx = node.subs.index :specialcharacters) + node.subs.delete_at idx + equation = node.content || '' + idx > 0 ? (node.subs.insert idx, :specialcharacters) : (node.subs.unshift :specialcharacters) + else + equation = node.content || '' + end + if node.style == 'asciimath' + # NOTE fop requires jeuclid to process mathml markup + equation_data = asciimath_available? ? ((::AsciiMath.parse equation).to_mathml 'mml:', 'xmlns:mml' => 'http://www.w3.org/1998/Math/MathML') : %() + else + # unhandled math; pass source to alt and required mathphrase element; dblatex will process alt as LaTeX math + equation_data = %( ) - end - if node.title? - %( + end + if node.title? + %( #{node.title} #{equation_data} ) - else - # WARNING dblatex displays the element inline instead of block as documented (except w/ mathml) - %( + else + # WARNING dblatex displays the element inline instead of block as documented (except w/ mathml) + %( #{equation_data} ) - end end + end - def olist node - result = [] - num_attribute = node.style ? %( numeration="#{node.style}") : nil - start_attribute = (node.attr? 'start') ? %( startingnumber="#{node.attr 'start'}") : nil - result << %() - result << %(#{node.title}) if node.title? - node.items.each do |item| - result << '' - result << %(#{item.text}) - result << item.content if item.blocks? - result << '' - end - result << %() - result * EOL + def convert_olist node + result = [] + num_attribute = node.style ? %( numeration="#{node.style}") : '' + start_attribute = (node.attr? 'start') ? %( startingnumber="#{node.attr 'start'}") : '' + result << %() + result << %(#{node.title}) if node.title? + node.items.each do |item| + result << %() + result << %(#{item.text}) + result << item.content if item.blocks? + result << '' end + result << %() + result.join LF + end - def open node - case node.style - when 'abstract' - if node.parent == node.document && node.document.attr?('doctype', 'book') - warn 'asciidoctor: WARNING: abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' - '' - else - %( -#{title_tag node}#{resolve_content node} + def convert_open node + case node.style + when 'abstract' + if node.parent == node.document && node.document.doctype == 'book' + logger.warn 'abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' + '' + else + %( +#{title_tag node}#{enclose_content node} ) - end - when 'partintro' - unless node.level == 0 && node.parent.context == :section && node.document.doctype == 'book' - warn 'asciidoctor: ERROR: partintro block can only be used when doctype is book and it\'s a child of a part section. Excluding block content.' - '' - else - %( -#{title_tag node}#{resolve_content node} + end + when 'partintro' + unless node.level == 0 && node.parent.context == :section && node.document.doctype == 'book' + logger.error 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' + '' + else + %( +#{title_tag node}#{enclose_content node} ) + end + else + reftext = node.reftext if (id = node.id) + role = node.role + if node.title? + %( +#{node.title} +#{content_spacer = node.content_model == :compound ? LF : ''}#{node.content}#{content_spacer} +) + elsif id || role + if node.content_model == :compound + %( +#{node.content} +) + else + %(#{node.content}) end else - node.content + enclose_content node end end + end - def page_break node - '' - end + def convert_page_break node + '' + end - def paragraph node - if node.title? - %( + def convert_paragraph node + if node.title? + %( #{node.title} #{node.content} ) - else - %(#{node.content}) - end + else + %(#{node.content}) end + end - def preamble node - if node.document.doctype == 'book' - %( + def convert_preamble node + if node.document.doctype == 'book' + %( #{title_tag node, false}#{node.content} ) - else - node.content - end + else + node.content end + end - def quote node - result = [] - result << %() - result << %(#{node.title}) if node.title? - if (node.attr? 'attribution') || (node.attr? 'citetitle') - result << '' - if node.attr? 'attribution' - result << (node.attr 'attribution') - end - if node.attr? 'citetitle' - result << %(#{node.attr 'citetitle'}) - end - result << '' - end - result << (resolve_content node) - result << '' - result * EOL - end + def convert_quote node + blockquote_tag(node, (node.has_role? 'epigraph') && 'epigraph') { enclose_content node } + end - def thematic_break node - '' - end + def convert_thematic_break node + '' + end - def sidebar node - %( -#{title_tag node}#{resolve_content node} + def convert_sidebar node + %( +#{title_tag node}#{enclose_content node} ) - end - - TABLE_PI_NAMES = ['dbhtml', 'dbfo', 'dblatex'] - TABLE_SECTIONS = [:head, :foot, :body] + end - def table node - has_body = false - result = [] - pgwide_attribute = (node.option? 'pgwide') ? ' pgwide="1"' : nil - result << %(<#{tag_name = node.title? ? 'table' : 'informaltable'}#{common_attributes node.id, node.role, node.reftext}#{pgwide_attribute} frame="#{node.attr 'frame', 'all'}" rowsep="#{['none', 'cols'].include?(node.attr 'grid') ? 0 : 1}" colsep="#{['none', 'rows'].include?(node.attr 'grid') ? 0 : 1}"#{(node.attr? 'orientation', 'landscape', nil) ? ' orient="land"' : nil}>) - if (node.option? 'unbreakable') - result << '' - elsif (node.option? 'breakable') - result << '' - end - result << %(#{node.title}) if tag_name == 'table' - col_width_key = if (width = (node.attr? 'width') ? (node.attr 'width') : nil) - TABLE_PI_NAMES.each do |pi_name| - result << %() - end - 'colabswidth' - else - 'colpcwidth' - end - result << %() - node.columns.each do |col| - result << %() - end - TABLE_SECTIONS.select {|tblsec| !node.rows[tblsec].empty? }.each do |tblsec| - has_body = true if tblsec == :body - result << %() - node.rows[tblsec].each do |row| - result << '' - row.each do |cell| - halign_attribute = (cell.attr? 'halign') ? %( align="#{cell.attr 'halign'}") : nil - valign_attribute = (cell.attr? 'valign') ? %( valign="#{cell.attr 'valign'}") : nil - colspan_attribute = cell.colspan ? %( namest="col_#{colnum = cell.column.attr 'colnumber'}" nameend="col_#{colnum + cell.colspan - 1}") : nil - rowspan_attribute = cell.rowspan ? %( morerows="#{cell.rowspan - 1}") : nil - # NOTE may not have whitespace (e.g., line breaks) as a direct descendant according to DocBook rules - entry_start = %() - cell_content = if tblsec == :head - cell.text + def convert_table node + has_body = false + result = [] + pgwide_attribute = (node.option? 'pgwide') ? ' pgwide="1"' : '' + if (frame = node.attr 'frame', 'all', 'table-frame') == 'ends' + frame = 'topbot' + end + grid = node.attr 'grid', nil, 'table-grid' + result << %(<#{tag_name = node.title? ? 'table' : 'informaltable'}#{common_attributes node.id, node.role, node.reftext}#{pgwide_attribute} frame="#{frame}" rowsep="#{['none', 'cols'].include?(grid) ? 0 : 1}" colsep="#{['none', 'rows'].include?(grid) ? 0 : 1}"#{(node.attr? 'orientation', 'landscape', 'table-orientation') ? ' orient="land"' : ''}>) + if (node.option? 'unbreakable') + result << '' + elsif (node.option? 'breakable') + result << '' + end + result << %(#{node.title}) if tag_name == 'table' + col_width_key = if (width = (node.attr? 'width') ? (node.attr 'width') : nil) + TABLE_PI_NAMES.each do |pi_name| + result << %() + end + 'colabswidth' + else + 'colpcwidth' + end + result << %() + node.columns.each do |col| + result << %() + end + node.rows.to_h.each do |tsec, rows| + next if rows.empty? + has_body = true if tsec == :body + result << %() + rows.each do |row| + result << '' + row.each do |cell| + halign_attribute = (cell.attr? 'halign') ? %( align="#{cell.attr 'halign'}") : '' + valign_attribute = (cell.attr? 'valign') ? %( valign="#{cell.attr 'valign'}") : '' + colspan_attribute = cell.colspan ? %( namest="col_#{colnum = cell.column.attr 'colnumber'}" nameend="col_#{colnum + cell.colspan - 1}") : '' + rowspan_attribute = cell.rowspan ? %( morerows="#{cell.rowspan - 1}") : '' + # NOTE may not have whitespace (e.g., line breaks) as a direct descendant according to DocBook rules + entry_start = %() + if tsec == :head + cell_content = cell.text + else + case cell.style + when :asciidoc + cell_content = cell.content + when :literal + cell_content = %(#{cell.text}) + when :header + cell_content = (cell_content = cell.content).empty? ? '' : %(#{cell_content.join ''}) else - case cell.style - when :asciidoc - cell.content - when :verse - %(#{cell.text}) - when :literal - %(#{cell.text}) - when :header - cell.content.map {|text| %(#{text}) }.join - else - cell.content.map {|text| %(#{text}) }.join - end + cell_content = (cell_content = cell.content).empty? ? '' : %(#{cell_content.join ''}) end - entry_end = (node.document.attr? 'cellbgcolor') ? %() : '' - result << %(#{entry_start}#{cell_content}#{entry_end}) end - result << '' + entry_end = (node.document.attr? 'cellbgcolor') ? %() : '' + result << %(#{entry_start}#{cell_content}#{entry_end}) end - result << %() + result << '' end - result << '' - result << %() - - warn 'asciidoctor: WARNING: tables must have at least one body row' unless has_body - result * EOL + result << %() end + result << '' + result << %() - alias :toc :skip - - def ulist node - result = [] - if node.style == 'bibliography' - result << %() - result << %(#{node.title}) if node.title? - node.items.each do |item| - result << '' - result << %(#{item.text}) - result << item.content if item.blocks? - result << '' - end - result << '' - else - mark_type = (checklist = node.option? 'checklist') ? 'none' : node.style - mark_attribute = mark_type ? %( mark="#{mark_type}") : nil - result << %() - result << %(#{node.title}) if node.title? - node.items.each do |item| - text_marker = if checklist && (item.attr? 'checkbox') - (item.attr? 'checked') ? '✓ ' : '❏ ' - else - nil - end - result << '' - result << %(#{text_marker}#{item.text}) - result << item.content if item.blocks? - result << '' - end - result << '' - end + logger.warn 'tables must have at least one body row' unless has_body + result.join LF + end - result * EOL - end + alias convert_toc skip - def verse node - result = [] - result << %() + def convert_ulist node + result = [] + if node.style == 'bibliography' + result << %() result << %(#{node.title}) if node.title? - if (node.attr? 'attribution') || (node.attr? 'citetitle') - result << '' - if node.attr? 'attribution' - result << (node.attr 'attribution') - end - if node.attr? 'citetitle' - result << %(#{node.attr 'citetitle'}) - end - result << '' + node.items.each do |item| + result << '' + result << %(#{item.text}) + result << item.content if item.blocks? + result << '' end - result << %(#{node.content}) - result << '' - result * EOL - end - - alias :video :skip - - def inline_anchor node - case node.type - when :ref - %() - when :xref - if (path = node.attributes['path']) - # QUESTION should we use refid as fallback text instead? (like the html5 backend?) - %(#{node.text || path}) - else - linkend = node.attributes['fragment'] || node.target - (text = node.text) ? %(#{text}) : %() - end - when :link - %(#{node.text}) - when :bibref - target = node.target - %([#{target}]) - else - warn %(asciidoctor: WARNING: unknown anchor type: #{node.type.inspect}) + result << '' + else + mark_type = (checklist = node.option? 'checklist') ? 'none' : node.style + mark_attribute = mark_type ? %( mark="#{mark_type}") : '' + result << %() + result << %(#{node.title}) if node.title? + node.items.each do |item| + text_marker = (item.attr? 'checked') ? '✓ ' : '❏ ' if checklist && (item.attr? 'checkbox') + result << %() + result << %(#{text_marker || ''}#{item.text}) + result << item.content if item.blocks? + result << '' end + result << '' end + result.join LF + end - def inline_break node - %(#{node.text}) - end + def convert_verse node + blockquote_tag(node, (node.has_role? 'epigraph') && 'epigraph') { %(#{node.content}) } + end - def inline_button node - %(#{node.text}) - end + alias convert_video skip - def inline_callout node - %() + def convert_inline_anchor node + case node.type + when :ref + %() + when :xref + if (path = node.attributes['path']) + # QUESTION should we use refid as fallback text instead? (like the html5 backend?) + %(#{node.text || path}) + else + linkend = node.attributes['fragment'] || node.target + (text = node.text) ? %(#{text}) : %() + end + when :link + %(#{node.text}) + when :bibref + %(#{text}) + else + logger.warn %(unknown anchor type: #{node.type.inspect}) + nil end + end - def inline_footnote node - if node.type == :xref - %() - else - %(#{node.text}) - end + def convert_inline_break node + %(#{node.text}) + end + + def convert_inline_button node + %(#{node.text}) + end + + def convert_inline_callout node + %() + end + + def convert_inline_footnote node + if node.type == :xref + %() + else + %(#{node.text}) end + end - def inline_image node - width_attribute = (node.attr? 'width') ? %( contentwidth="#{node.attr 'width'}") : nil - depth_attribute = (node.attr? 'height') ? %( contentdepth="#{node.attr 'height'}") : nil - %( + def convert_inline_image node + width_attribute = (node.attr? 'width') ? %( contentwidth="#{node.attr 'width'}") : '' + depth_attribute = (node.attr? 'height') ? %( contentdepth="#{node.attr 'height'}") : '' + %( -#{node.attr 'alt'} +#{node.alt} ) - end + end - def inline_indexterm node - if node.type == :visible - %(#{node.text}#{node.text}) + def convert_inline_indexterm node + if (see = node.attr 'see') + rel = %(\n#{see}) + elsif (see_also_list = node.attr 'see-also') + rel = see_also_list.map {|see_also| %(\n#{see_also}) }.join + else + rel = '' + end + if node.type == :visible + %( +#{node.text}#{rel} +#{node.text}) + else + if (numterms = (terms = node.attr 'terms').size) > 2 + %( +#{terms[0]}#{terms[1]}#{terms[2]}#{rel} +#{(node.document.option? 'indexterm-promotion') ? %[ + +#{terms[1]}#{terms[2]} + + +#{terms[2]} +] : ''}) + elsif numterms > 1 + %( +#{terms[0]}#{terms[1]}#{rel} +#{(node.document.option? 'indexterm-promotion') ? %[ + +#{terms[1]} +] : ''}) else - terms = node.attr 'terms' - result = [] - if (numterms = terms.size) > 2 - result << %( -#{terms[0]}#{terms[1]}#{terms[2]} + %( +#{terms[0]}#{rel} ) - end - if numterms > 1 - result << %( -#{terms[-2]}#{terms[-1]} -) - end - result << %( -#{terms[-1]} -) - result * EOL end end + end - def inline_kbd node - if (keys = node.attr 'keys').size == 1 - %(#{keys[0]}) - else - %(#{keys.map {|key| "#{key}" }.join}) - end + def convert_inline_kbd node + if (keys = node.attr 'keys').size == 1 + %(#{keys[0]}) + else + %(#{keys.join ''}) end + end - def inline_menu node - menu = node.attr 'menu' - if !(submenus = node.attr 'submenus').empty? - submenu_path = submenus.map {|submenu| %(#{submenu} ) }.join.chop - %(#{menu} #{submenu_path} #{node.attr 'menuitem'}) - elsif (menuitem = node.attr 'menuitem') + def convert_inline_menu node + menu = node.attr 'menu' + if (submenus = node.attr 'submenus').empty? + if (menuitem = node.attr 'menuitem') %(#{menu} #{menuitem}) else %(#{menu}) end + else + %(#{menu} #{submenus.join ' '} #{node.attr 'menuitem'}) end + end - (QUOTE_TAGS = { - :emphasis => ['', '', true], - :strong => ['', '', true], - :monospaced => ['', '', false], - :superscript => ['', '', false], - :subscript => ['', '', false], - :double => ['“', '”', true], - :single => ['‘', '’', true], - :mark => ['', '', false] - }).default = [nil, nil, true] - - def inline_quoted node - if (type = node.type) == :asciimath - if ((defined? ::AsciiMath) || ((defined? @asciimath_available) ? @asciimath_available : - (@asciimath_available = Helpers.require_library 'asciimath', true, :warn))) - # NOTE fop requires jeuclid to process raw mathml - %(#{(::AsciiMath.parse node.text).to_mathml 'mml:', 'xmlns:mml' => 'http://www.w3.org/1998/Math/MathML'}) + def convert_inline_quoted node + if (type = node.type) == :asciimath + # NOTE fop requires jeuclid to process mathml markup + asciimath_available? ? %(#{(::AsciiMath.parse node.text).to_mathml 'mml:', 'xmlns:mml' => 'http://www.w3.org/1998/Math/MathML'}) : %() + elsif type == :latexmath + # unhandled math; pass source to alt and required mathphrase element; dblatex will process alt as LaTeX math + %() + else + open, close, supports_phrase = QUOTE_TAGS[type] + text = node.text + if node.role + if supports_phrase + quoted_text = %(#{open}#{text}#{close}) else - %() + quoted_text = %(#{open.chop} role="#{node.role}">#{text}#{close}) end - elsif type == :latexmath - # unhandled math; pass source to alt and required mathphrase element; dblatex will process alt as LaTeX math - %() else - open, close, supports_phrase = QUOTE_TAGS[type] - text = node.text - if (role = node.role) - if supports_phrase - quoted_text = %(#{open}#{text}#{close}) - else - quoted_text = %(#{open.chop} role="#{role}">#{text}#{close}) - end - else - quoted_text = %(#{open}#{text}#{close}) - end - - node.id ? %(#{quoted_text}) : quoted_text + quoted_text = %(#{open}#{text}#{close}) end - end - def author_element doc, index = nil - firstname_key = index ? %(firstname_#{index}) : 'firstname' - middlename_key = index ? %(middlename_#{index}) : 'middlename' - lastname_key = index ? %(lastname_#{index}) : 'lastname' - email_key = index ? %(email_#{index}) : 'email' + node.id ? %(#{quoted_text}) : quoted_text + end + end - result = [] - result << '' - result << '' - result << %(#{doc.attr firstname_key}) if doc.attr? firstname_key - result << %(#{doc.attr middlename_key}) if doc.attr? middlename_key - result << %(#{doc.attr lastname_key}) if doc.attr? lastname_key - result << '' - result << %(#{doc.attr email_key}) if doc.attr? email_key - result << '' + private - result * EOL + def common_attributes id, role = nil, reftext = nil + if id + attrs = %( xml:id="#{id}"#{role ? %[ role="#{role}"] : ''}) + elsif role + attrs = %( role="#{role}") + else + attrs = '' + end + if reftext + if (reftext.include? '<') && ((reftext = reftext.gsub XmlSanitizeRx, '').include? ' ') + reftext = (reftext.squeeze ' ').strip + end + reftext = reftext.gsub '"', '"' if reftext.include? '"' + %(#{attrs} xreflabel="#{reftext}") + else + attrs end + end - def common_attributes id, role = nil, reftext = nil - res = id ? %( xml:id="#{id}") : '' - res = %(#{res} role="#{role}") if role - res = %(#{res} xreflabel="#{reftext}") if reftext - res - end + def author_tag doc, author + result = [] + result << '' + result << '' + result << %(#{doc.sub_replacements author.firstname}) if author.firstname + result << %(#{doc.sub_replacements author.middlename}) if author.middlename + result << %(#{doc.sub_replacements author.lastname}) if author.lastname + result << '' + result << %(#{author.email}) if author.email + result << '' + result.join LF + end - def doctype_declaration root_tag_name - nil + def document_info_tag doc + result = [''] + unless doc.notitle + if (title = doc.doctitle partition: true, use_fallback: true).subtitle? + result << %(#{title.main} +#{title.subtitle}) + else + result << %(#{title}) + end end - - def document_info_element doc, info_tag_prefix, use_info_tag_prefix = false - info_tag_prefix = '' unless use_info_tag_prefix - result = [] - result << %(<#{info_tag_prefix}info>) - result << document_title_tags(doc.doctitle :partition => true, :use_fallback => true) unless doc.notitle - if (date = (doc.attr? 'revdate') ? (doc.attr 'revdate') : ((doc.attr? 'reproducible') ? nil : (doc.attr 'docdate'))) - result << %(#{date}) - end - if doc.has_header? - if doc.attr? 'author' - if (authorcount = (doc.attr 'authorcount').to_i) < 2 - result << (author_element doc) - result << %(#{doc.attr 'authorinitials'}) if doc.attr? 'authorinitials' - else - result << '' - authorcount.times do |index| - result << (author_element doc, index + 1) - end - result << '' - end + if (date = (doc.attr? 'revdate') ? (doc.attr 'revdate') : ((doc.attr? 'reproducible') ? nil : (doc.attr 'docdate'))) + result << %(#{date}) + end + if doc.attr? 'copyright' + CopyrightRx =~ (doc.attr 'copyright') + result << '' + result << %(#{$1}) + result << %(#{$2}) if $2 + result << '' + end + if doc.header? + unless (authors = doc.authors).empty? + if authors.size > 1 + result << '' + authors.each {|author| result << (author_tag doc, author) } + result << '' + else + result << (author_tag doc, (author = authors[0])) + result << %(#{author.initials}) if author.initials end - if (doc.attr? 'revdate') && ((doc.attr? 'revnumber') || (doc.attr? 'revremark')) - result << %( + end + if (doc.attr? 'revdate') && ((doc.attr? 'revnumber') || (doc.attr? 'revremark')) + result << %( ) - result << %(#{doc.attr 'revnumber'}) if doc.attr? 'revnumber' - result << %(#{doc.attr 'revdate'}) if doc.attr? 'revdate' - result << %(#{doc.attr 'authorinitials'}) if doc.attr? 'authorinitials' - result << %(#{doc.attr 'revremark'}) if doc.attr? 'revremark' - result << %( + result << %(#{doc.attr 'revnumber'}) if doc.attr? 'revnumber' + result << %(#{doc.attr 'revdate'}) if doc.attr? 'revdate' + result << %(#{doc.attr 'authorinitials'}) if doc.attr? 'authorinitials' + result << %(#{doc.attr 'revremark'}) if doc.attr? 'revremark' + result << %( ) - end - unless (head_docinfo = doc.docinfo).empty? - result << head_docinfo - end - result << %(#{doc.attr 'orgname'}) if doc.attr? 'orgname' end - result << %() - - if doc.doctype == 'manpage' - result << '' - result << %(#{doc.attr 'mantitle'}) if doc.attr? 'mantitle' - result << %(#{doc.attr 'manvolnum'}) if doc.attr? 'manvolnum' - result << '' - result << '' - result << %(#{doc.attr 'manname'}) if doc.attr? 'manname' - result << %(#{doc.attr 'manpurpose'}) if doc.attr? 'manpurpose' - result << '' - end - - result * EOL + if (doc.attr? 'front-cover-image') || (doc.attr? 'back-cover-image') + if (back_cover_tag = cover_tag doc, 'back') + result << (cover_tag doc, 'front', true) + result << back_cover_tag + elsif (front_cover_tag = cover_tag doc, 'front') + result << front_cover_tag + end + end + result << %(#{doc.attr 'orgname'}) if doc.attr? 'orgname' + unless (docinfo_content = doc.docinfo).empty? + result << docinfo_content + end + end + result << '' + + if doc.doctype == 'manpage' + result << '' + result << %(#{doc.attr 'mantitle'}) if doc.attr? 'mantitle' + result << %(#{doc.attr 'manvolnum'}) if doc.attr? 'manvolnum' + result << %(#{doc.attr 'mansource', ' '}) + result << %(#{doc.attr 'manmanual', ' '}) + result << '' + result << '' + result += (doc.attr 'mannames').map {|n| %(#{n}) } if doc.attr? 'mannames' + result << %(#{doc.attr 'manpurpose'}) if doc.attr? 'manpurpose' + result << '' end - def document_ns_attributes doc - ' xmlns="http://docbook.org/ns/docbook" xmlns:xl="http://www.w3.org/1999/xlink" version="5.0"' - end + result.join LF + end - def lang_attribute_name - 'xml:lang' - end + # FIXME this should be handled through a template mechanism + def enclose_content node + node.content_model == :compound ? node.content : %(#{node.content}) + end - def document_title_tags title - if title.subtitle? - %(#{title.main} -#{title.subtitle}) - else - %(#{title}) + def title_tag node, optional = true + !optional || node.title? ? %(#{node.title}\n) : '' + end + + def cover_tag doc, face, use_placeholder = false + if (cover_image = doc.attr %(#{face}-cover-image)) + width_attr = '' + depth_attr = '' + if (cover_image.include? ':') && ImageMacroRx =~ cover_image + attrlist = $2 + cover_image = doc.image_uri $1 + if attrlist + attrs = (AttributeList.new attrlist).parse ['alt', 'width', 'height'] + if attrs.key? 'scaledwidth' + # NOTE scalefit="1" is the default in this case + width_attr = %( width="#{attrs['scaledwidth']}") + else + width_attr = %( contentwidth="#{attrs['width']}") if attrs.key? 'width' + depth_attr = %( contentdepth="#{attrs['height']}") if attrs.key? 'height' + end + end end + %( + + + + + +) + elsif use_placeholder + %() end + end - # FIXME this should be handled through a template mechanism - def resolve_content node - node.content_model == :compound ? node.content : %(#{node.content}) - end + def blockquote_tag node, tag_name = nil + if tag_name + start_tag, end_tag = %(<#{tag_name}), %() + else + start_tag, end_tag = '' + end + result = [%(#{start_tag}#{common_attributes node.id, node.role, node.reftext}>)] + result << %(#{node.title}) if node.title? + if (node.attr? 'attribution') || (node.attr? 'citetitle') + result << '' + result << (node.attr 'attribution') if node.attr? 'attribution' + result << %(#{node.attr 'citetitle'}) if node.attr? 'citetitle' + result << '' + end + result << yield + result << end_tag + result.join LF + end - def title_tag node, optional = true - !optional || node.title? ? %(#{node.title}\n) : nil - end + def asciimath_available? + (@asciimath_status ||= load_asciimath) == :loaded end + + def load_asciimath + (defined? ::AsciiMath.parse) ? :loaded : (Helpers.require_library 'asciimath', true, :warn).nil? ? :unavailable : :loaded + end +end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/converter/factory.rb asciidoctor-2.0.10/lib/asciidoctor/converter/factory.rb --- asciidoctor-1.5.5/lib/asciidoctor/converter/factory.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/converter/factory.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,227 +0,0 @@ -# encoding: UTF-8 -module Asciidoctor - module Converter - # A factory for instantiating converters that are used to convert a - # {Document} (i.e., a parsed AsciiDoc tree structure) or {AbstractNode} to - # a backend format such as HTML or DocBook. {Factory Converter::Factory} is - # the primary entry point for creating, registering and accessing - # converters. - # - # {Converter} objects are instantiated by passing a String backend name - # and, optionally, an options Hash to the {Factory#create} method. The - # backend can be thought of as an intent to convert a document to a - # specified format. For example: - # - # converter = Asciidoctor::Converter::Factory.create 'html5', :htmlsyntax => 'xml' - # - # Converter objects are thread safe. They only survive the lifetime of a single conversion. - # - # A singleton instance of {Factory Converter::Factory} can be accessed - # using the {Factory.default} method. This instance maintains the global - # registry of statically registered converters. The registery includes - # built-in converters for {Html5Converter HTML 5}, {DocBook5Converter - # DocBook 5} and {DocBook45Converter DocBook 4.5}, as well as any custom - # converters that have been discovered or explicitly registered. - # - # If the {https://rubygems.org/gems/thread_safe thread_safe} gem is - # installed, access to the default factory is guaranteed to be thread safe. - # Otherwise, a warning is issued to the user. - class Factory - @__default__ = nil - class << self - - # Public: Retrieves a singleton instance of {Factory Converter::Factory}. - # - # If the thread_safe gem is installed, the registry of converters is - # initialized as a ThreadSafe::Cache. Otherwise, a warning is issued and - # the registry of converters is initialized using a normal Hash. - # - # initialize_singleton - A Boolean to indicate whether the singleton should - # be initialize if it has not already been created. - # If false, and a singleton has not been previously - # initialized, a fresh instance is returned. - # - # Returns the default [Factory] singleton instance - def default initialize_singleton = true - return @__default__ || new unless initialize_singleton - # FIXME this assignment is not thread_safe, may need to use a ::Threadsafe helper here - @__default__ ||= begin - require 'thread_safe'.to_s unless defined? ::ThreadSafe - new ::ThreadSafe::Cache.new - rescue ::LoadError - warn 'asciidoctor: WARNING: gem \'thread_safe\' is not installed. This gem is recommended when registering custom converters.' - new - end - end - - # Public: Register a custom converter in the global converter factory to - # handle conversion to the specified backends. If the backend value is an - # asterisk, the converter is used to handle any backend that does not have - # an explicit converter. - # - # converter - The Converter class to register - # backends - A String Array of backend names that this converter should - # be registered to handle (optional, default: ['*']) - # - # Returns nothing - def register converter, backends = ['*'] - default.register converter, backends - end - - # Public: Lookup the custom converter for the specified backend in the - # global factory. - # - # This method does not resolve the built-in converters. - # - # backend - The String backend name - # - # Returns the [Converter] class registered to convert the specified backend - # or nil if no match is found - def resolve backend - default.resolve backend - end - - # Public: Lookup the converter for the specified backend in the global - # factory and instantiate it, forwarding the Hash of options to the - # constructor of the converter class. - # - # If the custom converter is not found, an attempt will be made to find - # and instantiate a built-in converter. - # - # - # backend - The String backend name - # opts - A Hash of options to pass to the converter - # - # Returns an instance of [Converter] for converting the specified backend or - # nil if no match is found. - def create backend, opts = {} - default.create backend, opts - end - - # Public: Retrieve the global Hash of custom Converter classes keyed by backend. - # - # Returns the the global [Hash] of custom Converter classes - def converters - default.converters - end - - # Public: Unregister all Converter classes in the global factory. - # - # Returns nothing - def unregister_all - default.unregister_all - end - end - - # Public: Get the Hash of Converter classes keyed by backend name - attr_reader :converters - - def initialize converters = nil - @converters = converters || {} - @star_converter = nil - end - - # Public: Register a custom converter with this factory to handle conversion - # to the specified backends. If the backend value is an asterisk, the - # converter is used to handle any backend that does not have an explicit - # converter. - # - # converter - The Converter class to register - # backends - A String Array of backend names that this converter should - # be registered to handle (optional, default: ['*']) - # - # Returns nothing - def register converter, backends = ['*'] - backends.each do |backend| - @converters[backend] = converter - if backend == '*' - @star_converter = converter - end - end - nil - end - - # Public: Lookup the custom converter registered with this factory to handle - # the specified backend. - # - # backend - The String backend name - # - # Returns the [Converter] class registered to convert the specified backend - # or nil if no match is found - def resolve backend - @converters && (@converters[backend] || @star_converter) - end - - # Public: Unregister all Converter classes that are registered with this - # factory. - # - # Returns nothing - def unregister_all - @converters.clear - @star_converter = nil - end - - # Public: Create a new Converter object that can be used to convert the - # {AbstractNode} (typically a {Document}) to the specified String backend. - # This method accepts an optional Hash of options that are passed to the - # converter's constructor. - # - # If a custom Converter is found to convert the specified backend, it is - # instantiated (if necessary) and returned immediately. If a custom - # Converter is not found, an attempt is made to resolve a built-in - # converter. If the `:template_dirs` key is found in the Hash passed as the - # second argument, a {CompositeConverter} is created that delegates to a - # {TemplateConverter} and, if resolved, the built-in converter. If the - # `:template_dirs` key is not found, the built-in converter is returned - # or nil if no converter is resolved. - # - # backend - the String backend name - # opts - an optional Hash of options that get passed on to the converter's - # constructor. If the :template_dirs key is found in the options - # Hash, this method returns a {CompositeConverter} that delegates - # to a {TemplateConverter}. (optional, default: {}) - # - # Returns the [Converter] object - def create backend, opts = {} - if (converter = resolve backend) - return ::Class === converter ? (converter.new backend, opts) : converter - end - - base_converter = case backend - when 'html5' - unless defined? ::Asciidoctor::Converter::Html5Converter - require 'asciidoctor/converter/html5'.to_s - end - Html5Converter.new backend, opts - when 'docbook5' - unless defined? ::Asciidoctor::Converter::DocBook5Converter - require 'asciidoctor/converter/docbook5'.to_s - end - DocBook5Converter.new backend, opts - when 'docbook45' - unless defined? ::Asciidoctor::Converter::DocBook45Converter - require 'asciidoctor/converter/docbook45'.to_s - end - DocBook45Converter.new backend, opts - when 'manpage' - unless defined? ::Asciidoctor::Converter::ManPageConverter - require 'asciidoctor/converter/manpage'.to_s - end - ManPageConverter.new backend, opts - end - - return base_converter unless opts.key? :template_dirs - - unless defined? ::Asciidoctor::Converter::TemplateConverter - require 'asciidoctor/converter/template'.to_s - end - unless defined? ::Asciidoctor::Converter::CompositeConverter - require 'asciidoctor/converter/composite'.to_s - end - template_converter = TemplateConverter.new backend, opts[:template_dirs], opts - # QUESTION should we omit the composite converter if built_in_converter is nil? - CompositeConverter.new backend, template_converter, base_converter - end - end - end -end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/converter/html5.rb asciidoctor-2.0.10/lib/asciidoctor/converter/html5.rb --- asciidoctor-1.5.5/lib/asciidoctor/converter/html5.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/converter/html5.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,229 +1,265 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor - # A built-in {Converter} implementation that generates HTML 5 output - # consistent with the html5 backend from AsciiDoc Python. - class Converter::Html5Converter < Converter::BuiltIn - (QUOTE_TAGS = { - :emphasis => ['', '', true], - :strong => ['', '', true], - :monospaced => ['', '', true], - :superscript => ['', '', true], - :subscript => ['', '', true], - :double => ['“', '”', false], - :single => ['‘', '’', false], - :mark => ['', '', true], - :asciimath => ['\\$', '\\$', false], - :latexmath => ['\\(', '\\)', false] - # Opal can't resolve these constants when referenced here - #:asciimath => INLINE_MATH_DELIMITERS[:asciimath] + [false], - #:latexmath => INLINE_MATH_DELIMITERS[:latexmath] + [false] - }).default = [nil, nil, nil] - +# A built-in {Converter} implementation that generates HTML 5 output +# consistent with the html5 backend from AsciiDoc Python. +class Converter::Html5Converter < Converter::Base + register_for 'html5' + + (QUOTE_TAGS = { + monospaced: ['', '', true], + emphasis: ['', '', true], + strong: ['', '', true], + double: ['“', '”'], + single: ['‘', '’'], + mark: ['', '', true], + superscript: ['', '', true], + subscript: ['', '', true], + asciimath: ['\$', '\$'], + latexmath: ['\(', '\)'], + # Opal can't resolve these constants when referenced here + #asciimath: INLINE_MATH_DELIMITERS[:asciimath] + [false], + #latexmath: INLINE_MATH_DELIMITERS[:latexmath] + [false], + }).default = ['', ''] + + DropAnchorRx = /<(?:a[^>+]+|\/a)>/ + StemBreakRx = / *\\\n(?:\\?\n)*|\n\n+/ + if RUBY_ENGINE == 'opal' + # NOTE In JavaScript, ^ matches the start of the string when the m flag is not set + SvgPreambleRx = /^#{CC_ALL}*?(?=]*>/ + else SvgPreambleRx = /\A.*?(?=]*>/ - DimensionAttributeRx = /\s(?:width|height|style)=(["']).*?\1/ + end + DimensionAttributeRx = /\s(?:width|height|style)=(["'])#{CC_ANY}*?\1/ - def initialize backend, opts = {} - @xml_mode = opts[:htmlsyntax] == 'xml' - @void_element_slash = @xml_mode ? '/' : nil - @stylesheets = Stylesheets.instance + def initialize backend, opts = {} + @backend = backend + if opts[:htmlsyntax] == 'xml' + syntax = 'xml' + @xml_mode = true + @void_element_slash = '/' + else + syntax = 'html' + @xml_mode = nil + @void_element_slash = '' end + init_backend_traits basebackend: 'html', filetype: 'html', htmlsyntax: syntax, outfilesuffix: '.html', supports_templates: true + end - def document node - result = [] - slash = @void_element_slash - br = %() - unless (asset_uri_scheme = (node.attr 'asset-uri-scheme', 'https')).empty? - asset_uri_scheme = %(#{asset_uri_scheme}:) - end - cdn_base = %(#{asset_uri_scheme}//cdnjs.cloudflare.com/ajax/libs) - linkcss = node.safe >= SafeMode::SECURE || (node.attr? 'linkcss') - result << '' - lang_attribute = (node.attr? 'nolang') ? nil : %( lang="#{node.attr 'lang', 'en'}") - result << %() - result << %( + def convert node, transform = node.node_name, opts = nil + if transform == 'inline_quoted'; return convert_inline_quoted node + elsif transform == 'paragraph'; return convert_paragraph node + elsif transform == 'inline_anchor'; return convert_inline_anchor node + elsif transform == 'section'; return convert_section node + elsif transform == 'listing'; return convert_listing node + elsif transform == 'literal'; return convert_literal node + elsif transform == 'ulist'; return convert_ulist node + elsif transform == 'olist'; return convert_olist node + elsif transform == 'dlist'; return convert_dlist node + elsif transform == 'admonition'; return convert_admonition node + elsif transform == 'colist'; return convert_colist node + elsif transform == 'embedded'; return convert_embedded node + elsif transform == 'example'; return convert_example node + elsif transform == 'floating_title'; return convert_floating_title node + elsif transform == 'image'; return convert_image node + elsif transform == 'inline_break'; return convert_inline_break node + elsif transform == 'inline_button'; return convert_inline_button node + elsif transform == 'inline_callout'; return convert_inline_callout node + elsif transform == 'inline_footnote'; return convert_inline_footnote node + elsif transform == 'inline_image'; return convert_inline_image node + elsif transform == 'inline_indexterm'; return convert_inline_indexterm node + elsif transform == 'inline_kbd'; return convert_inline_kbd node + elsif transform == 'inline_menu'; return convert_inline_menu node + elsif transform == 'open'; return convert_open node + elsif transform == 'page_break'; return convert_page_break node + elsif transform == 'preamble'; return convert_preamble node + elsif transform == 'quote'; return convert_quote node + elsif transform == 'sidebar'; return convert_sidebar node + elsif transform == 'stem'; return convert_stem node + elsif transform == 'table'; return convert_table node + elsif transform == 'thematic_break'; return convert_thematic_break node + elsif transform == 'verse'; return convert_verse node + elsif transform == 'video'; return convert_video node + elsif transform == 'document'; return convert_document node + elsif transform == 'toc'; return convert_toc node + elsif transform == 'pass'; return convert_pass node + elsif transform == 'audio'; return convert_audio node + else; return super + end + end + + def convert_document node + br = %() + unless (asset_uri_scheme = (node.attr 'asset-uri-scheme', 'https')).empty? + asset_uri_scheme = %(#{asset_uri_scheme}:) + end + cdn_base_url = %(#{asset_uri_scheme}//cdnjs.cloudflare.com/ajax/libs) + linkcss = node.attr? 'linkcss' + result = [''] + lang_attribute = (node.attr? 'nolang') ? '' : %( lang="#{node.attr 'lang', 'en'}") + result << %() + result << %( - + ) - result << %() if node.attr? 'app-name' - result << %() if node.attr? 'description' - result << %() if node.attr? 'keywords' - result << %() if node.attr? 'authors' - result << %() if node.attr? 'copyright' - result << %(#{node.doctitle :sanitize => true, :use_fallback => true}) - - if DEFAULT_STYLESHEET_KEYS.include?(node.attr 'stylesheet') - if (webfonts = node.attr 'webfonts') - result << %() - end - if linkcss - result << %() - else - result << @stylesheets.embed_primary_stylesheet - end - elsif node.attr? 'stylesheet' - if linkcss - result << %() - else - result << %() - end end - - if node.attr? 'icons', 'font' - if node.attr? 'iconfont-remote' - result << %() - else - iconfont_stylesheet = %(#{node.attr 'iconfont-name', 'font-awesome'}.css) - result << %() - end + elsif node.attr? 'stylesheet' + if linkcss + result << %() + else + result << %() end + end - case (highlighter = node.attr 'source-highlighter') - when 'coderay' - if (node.attr 'coderay-css', 'class') == 'class' - if linkcss - result << %() - else - result << @stylesheets.embed_coderay_stylesheet - end - end - when 'pygments' - if (node.attr 'pygments-css', 'class') == 'class' - pygments_style = node.attr 'pygments-style' - if linkcss - result << %() - else - result << (@stylesheets.embed_pygments_stylesheet pygments_style) - end - end + if node.attr? 'icons', 'font' + if node.attr? 'iconfont-remote' + result << %() + else + iconfont_stylesheet = %(#{node.attr 'iconfont-name', 'font-awesome'}.css) + result << %() end + end - unless (docinfo_content = node.docinfo).empty? - result << docinfo_content - end + if (syntax_hl = node.syntax_highlighter) && (syntax_hl.docinfo? :head) + result << (syntax_hl.docinfo :head, node, cdn_base_url: cdn_base_url, linkcss: linkcss, self_closing_tag_slash: slash) + end - result << '' - body_attrs = [] - body_attrs << %(id="#{node.id}") if node.id - if (sectioned = node.sections?) && (node.attr? 'toc-class') && (node.attr? 'toc') && (node.attr? 'toc-placement', 'auto') - body_attrs << %(class="#{node.doctype} #{node.attr 'toc-class'} toc-#{node.attr 'toc-position', 'header'}") - else - body_attrs << %(class="#{node.doctype}") - end - body_attrs << %(style="max-width: #{node.attr 'max-width'};") if node.attr? 'max-width' - result << %() - - unless node.noheader - result << '' + result.join LF + end - %( + def convert_example node + id_attribute = node.id ? %( id="#{node.id}") : '' + if node.option? 'collapsible' + class_attribute = node.role ? %( class="#{node.role}") : '' + summary_element = node.title? ? %(#{node.title}) : 'Details' + %( +#{summary_element} +
+#{node.content} +
+) + else + title_element = node.title? ? %(
#{node.captioned_title}
\n) : '' + %( #{title_element}
#{node.content}
) end + end - def floating_title node - tag_name = %(h#{node.level + 1}) - id_attribute = node.id ? %( id="#{node.id}") : nil - classes = [node.style, node.role].compact - %(<#{tag_name}#{id_attribute} class="#{classes * ' '}">#{node.title}) - end + def convert_floating_title node + tag_name = %(h#{node.level + 1}) + id_attribute = node.id ? %( id="#{node.id}") : '' + classes = [node.style, node.role].compact + %(<#{tag_name}#{id_attribute} class="#{classes.join ' '}">#{node.title}) + end - def image node - target = node.attr 'target' - width_attr = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : nil - height_attr = (node.attr? 'height') ? %( height="#{node.attr 'height'}") : nil - if ((node.attr? 'format', 'svg', false) || (target.include? '.svg')) && node.document.safe < SafeMode::SECURE && - ((svg = (node.option? 'inline')) || (obj = (node.option? 'interactive'))) - if svg - img = (read_svg_contents node, target) || %(#{node.attr 'alt'}) - elsif obj - fallback = (node.attr? 'fallback') ? %(#{node.attr 'alt'}) : %(#{node.attr 'alt'}) - img = %(#{fallback}) - end - end - img ||= %(#{node.attr 'alt'}) - if (link = node.attr 'link') - img = %(#{img}) - end - id_attr = node.id ? %( id="#{node.id}") : nil - classes = ['imageblock', node.role].compact - class_attr = %( class="#{classes * ' '}") - styles = [] - styles << %(text-align: #{node.attr 'align'}) if node.attr? 'align' - styles << %(float: #{node.attr 'float'}) if node.attr? 'float' - style_attr = styles.empty? ? nil : %( style="#{styles * ';'}") - title_el = node.title? ? %(\n
#{node.captioned_title}
) : nil - %( + def convert_image node + target = node.attr 'target' + width_attr = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : '' + height_attr = (node.attr? 'height') ? %( height="#{node.attr 'height'}") : '' + if ((node.attr? 'format', 'svg') || (target.include? '.svg')) && node.document.safe < SafeMode::SECURE && + ((svg = (node.option? 'inline')) || (obj = (node.option? 'interactive'))) + if svg + img = (read_svg_contents node, target) || %(#{node.alt}) + elsif obj + fallback = (node.attr? 'fallback') ? %(#{encode_attribute_value node.alt}) : %(#{node.alt}) + img = %(#{fallback}) + end + end + img ||= %(#{encode_attribute_value node.alt}) + if node.attr? 'link' + img = %(#{img}) + end + id_attr = node.id ? %( id="#{node.id}") : '' + classes = ['imageblock'] + classes << (node.attr 'float') if node.attr? 'float' + classes << %(text-#{node.attr 'align'}) if node.attr? 'align' + classes << node.role if node.role + class_attr = %( class="#{classes.join ' '}") + title_el = node.title? ? %(\n
#{node.captioned_title}
) : '' + %(
#{img}
#{title_el} ) - end - - def listing node - nowrap = !(node.document.attr? 'prewrap') || (node.option? 'nowrap') - if node.style == 'source' - if (language = node.attr 'language', nil, false) - code_attrs = %( data-lang="#{language}") - else - code_attrs = nil - end - case node.document.attr 'source-highlighter' - when 'coderay' - pre_class = %( class="CodeRay highlight#{nowrap ? ' nowrap' : nil}") - when 'pygments' - pre_class = %( class="pygments highlight#{nowrap ? ' nowrap' : nil}") - when 'highlightjs', 'highlight.js' - pre_class = %( class="highlightjs highlight#{nowrap ? ' nowrap' : nil}") - code_attrs = %( class="language-#{language}"#{code_attrs}) if language - when 'prettify' - pre_class = %( class="prettyprint highlight#{nowrap ? ' nowrap' : nil}#{(node.attr? 'linenums') ? ' linenums' : nil}") - code_attrs = %( class="language-#{language}"#{code_attrs}) if language - when 'html-pipeline' - pre_class = language ? %( lang="#{language}") : nil - code_attrs = nil - else - pre_class = %( class="highlight#{nowrap ? ' nowrap' : nil}") - code_attrs = %( class="language-#{language}"#{code_attrs}) if language - end - pre_start = %() - pre_end = '
' - else - pre_start = %() - pre_end = '' - end + end - id_attribute = node.id ? %( id="#{node.id}") : nil - title_element = node.title? ? %(
#{node.captioned_title}
\n) : nil - %( + def convert_listing node + nowrap = (node.option? 'nowrap') || !(node.document.attr? 'prewrap') + if node.style == 'source' + lang = node.attr 'language' + if (syntax_hl = node.document.syntax_highlighter) + opts = syntax_hl.highlight? ? { + css_mode: ((doc_attrs = node.document.attributes)[%(#{syntax_hl.name}-css)] || :class).to_sym, + style: doc_attrs[%(#{syntax_hl.name}-style)], + } : {} + opts[:nowrap] = nowrap + else + pre_open = %(
)
+        pre_close = '
' + end + else + pre_open = %() + pre_close = '' + end + id_attribute = node.id ? %( id="#{node.id}") : '' + title_element = node.title? ? %(
#{node.captioned_title}
\n) : '' + %( #{title_element}
-#{pre_start}#{node.content}#{pre_end} +#{syntax_hl ? (syntax_hl.format node, lang, opts) : pre_open + (node.content || '') + pre_close}
) - end + end - def literal node - id_attribute = node.id ? %( id="#{node.id}") : nil - title_element = node.title? ? %(
#{node.title}
\n) : nil - nowrap = !(node.document.attr? 'prewrap') || (node.option? 'nowrap') - %( + def convert_literal node + id_attribute = node.id ? %( id="#{node.id}") : '' + title_element = node.title? ? %(
#{node.title}
\n) : '' + nowrap = !(node.document.attr? 'prewrap') || (node.option? 'nowrap') + %( #{title_element}
-#{node.content} +#{node.content}
) - end - - def stem node - id_attribute = node.id ? %( id="#{node.id}") : nil - title_element = node.title? ? %(
#{node.title}
\n) : nil - open, close = BLOCK_MATH_DELIMITERS[node.style.to_sym] + end - unless ((equation = node.content).start_with? open) && (equation.end_with? close) + def convert_stem node + id_attribute = node.id ? %( id="#{node.id}") : '' + title_element = node.title? ? %(
#{node.title}
\n) : '' + open, close = BLOCK_MATH_DELIMITERS[style = node.style.to_sym] + if (equation = node.content) + if style == :asciimath && (equation.include? LF) + br = %(#{LF}) + equation = equation.gsub(StemBreakRx) { %(#{close}#{br * ($&.count LF)}#{open}) } + end + unless (equation.start_with? open) && (equation.end_with? close) equation = %(#{open}#{equation}#{close}) end - - %( + else + equation = '' + end + %( #{title_element}
#{equation}
) - end - - def olist node - result = [] - id_attribute = node.id ? %( id="#{node.id}") : nil - classes = ['olist', node.style, node.role].compact - class_attribute = %( class="#{classes * ' '}") - - result << %() - result << %(
#{node.title}
) if node.title? - - type_attribute = (keyword = node.list_marker_keyword) ? %( type="#{keyword}") : nil - start_attribute = (node.attr? 'start') ? %( start="#{node.attr 'start'}") : nil - reversed_attribute = (node.option? 'reversed') ? (append_boolean_attribute 'reversed', @xml_mode) : nil - result << %(
    ) + end - node.items.each do |item| + def convert_olist node + result = [] + id_attribute = node.id ? %( id="#{node.id}") : '' + classes = ['olist', node.style, node.role].compact + class_attribute = %( class="#{classes.join ' '}") + + result << %() + result << %(
    #{node.title}
    ) if node.title? + + type_attribute = (keyword = node.list_marker_keyword) ? %( type="#{keyword}") : '' + start_attribute = (node.attr? 'start') ? %( start="#{node.attr 'start'}") : '' + reversed_attribute = (node.option? 'reversed') ? (append_boolean_attribute 'reversed', @xml_mode) : '' + result << %(
      ) + + node.items.each do |item| + if item.id + result << %(
    1. ) + elsif item.role + result << %(
    2. ) + else result << '
    3. ' - result << %(

      #{item.text}

      ) - result << item.content if item.blocks? - result << '
    4. ' end - - result << '
    ' - result << '' - result * EOL + result << %(

    #{item.text}

    ) + result << item.content if item.blocks? + result << '' end - def open node - if (style = node.style) == 'abstract' - if node.parent == node.document && node.document.doctype == 'book' - warn 'asciidoctor: WARNING: abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' - '' - else - id_attr = node.id ? %( id="#{node.id}") : nil - title_el = node.title? ? %(
    #{node.title}
    \n) : nil - %( + result << '
' + result << '' + result.join LF + end + + def convert_open node + if (style = node.style) == 'abstract' + if node.parent == node.document && node.document.doctype == 'book' + logger.warn 'abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' + '' + else + id_attr = node.id ? %( id="#{node.id}") : '' + title_el = node.title? ? %(
#{node.title}
\n) : '' + %( #{title_el}
#{node.content}
) - end - elsif style == 'partintro' && (node.level > 0 || node.parent.context != :section || node.document.doctype != 'book') - warn 'asciidoctor: ERROR: partintro block can only be used when doctype is book and it\'s a child of a book part. Excluding block content.' - '' - else - id_attr = node.id ? %( id="#{node.id}") : nil - title_el = node.title? ? %(
#{node.title}
\n) : nil - %( + end + elsif style == 'partintro' && (node.level > 0 || node.parent.context != :section || node.document.doctype != 'book') + logger.error 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' + '' + else + id_attr = node.id ? %( id="#{node.id}") : '' + title_el = node.title? ? %(
#{node.title}
\n) : '' + %( #{title_el}
#{node.content}
) - end - end - - def page_break node - '
' end + end - def paragraph node - class_attribute = node.role ? %(class="paragraph #{node.role}") : 'class="paragraph"' - attributes = node.id ? %(id="#{node.id}" #{class_attribute}) : class_attribute + def convert_page_break node + '
' + end - if node.title? - %(
+ def convert_paragraph node + if node.role + attributes = %(#{node.id ? %[ id="#{node.id}"] : ''} class="paragraph #{node.role}") + elsif node.id + attributes = %( id="#{node.id}" class="paragraph") + else + attributes = ' class="paragraph"' + end + if node.title? + %(
#{node.title}

#{node.content}

) - else - %(
+ else + %(

#{node.content}

) - end end + end + + alias convert_pass content_only - def preamble node - if (doc = node.document).attr?('toc-placement', 'preamble') && doc.sections? && (doc.attr? 'toc') - toc = %( + def convert_preamble node + if (doc = node.document).attr?('toc-placement', 'preamble') && doc.sections? && (doc.attr? 'toc') + toc = %(
#{doc.attr 'toc-title'}
-#{outline doc} +#{convert_outline doc}
) - else - toc = nil - end + else + toc = '' + end - %(
+ %(
#{node.content}
#{toc}
) - end + end - def quote node - id_attribute = node.id ? %( id="#{node.id}") : nil - classes = ['quoteblock', node.role].compact - class_attribute = %( class="#{classes * ' '}") - title_element = node.title? ? %(\n
#{node.title}
) : nil - attribution = (node.attr? 'attribution') ? (node.attr 'attribution') : nil - citetitle = (node.attr? 'citetitle') ? (node.attr 'citetitle') : nil - if attribution || citetitle - cite_element = citetitle ? %(#{citetitle}) : nil - attribution_text = attribution ? %(— #{attribution}#{citetitle ? "\n" : nil}) : nil - attribution_element = %(\n
\n#{attribution_text}#{cite_element}\n
) - else - attribution_element = nil - end + def convert_quote node + id_attribute = node.id ? %( id="#{node.id}") : '' + classes = ['quoteblock', node.role].compact + class_attribute = %( class="#{classes.join ' '}") + title_element = node.title? ? %(\n
#{node.title}
) : '' + attribution = (node.attr? 'attribution') ? (node.attr 'attribution') : nil + citetitle = (node.attr? 'citetitle') ? (node.attr 'citetitle') : nil + if attribution || citetitle + cite_element = citetitle ? %(#{citetitle}) : '' + attribution_text = attribution ? %(— #{attribution}#{citetitle ? "\n" : ''}) : '' + attribution_element = %(\n
\n#{attribution_text}#{cite_element}\n
) + else + attribution_element = '' + end - %(#{title_element} + %(#{title_element}
#{node.content}
#{attribution_element}
) - end + end - def thematic_break node - %() - end + def convert_thematic_break node + %() + end - def sidebar node - id_attribute = node.id ? %( id="#{node.id}") : nil - title_element = node.title? ? %(
#{node.title}
\n) : nil - %( + def convert_sidebar node + id_attribute = node.id ? %( id="#{node.id}") : '' + title_element = node.title? ? %(
#{node.title}
\n) : '' + %(
#{title_element}#{node.content}
) - end + end - def table node - result = [] - id_attribute = node.id ? %( id="#{node.id}") : nil - classes = ['tableblock', %(frame-#{node.attr 'frame', 'all'}), %(grid-#{node.attr 'grid', 'all'})] - styles = [] - unless (node.option? 'autowidth') && !(node.attr? 'width', nil, false) - if node.attr? 'tablepcwidth', 100 - classes << 'spread' - else - styles << %(width: #{node.attr 'tablepcwidth'}%;) + def convert_table node + result = [] + id_attribute = node.id ? %( id="#{node.id}") : '' + classes = ['tableblock', %(frame-#{node.attr 'frame', 'all', 'table-frame'}), %(grid-#{node.attr 'grid', 'all', 'table-grid'})] + if (stripes = node.attr 'stripes', nil, 'table-stripes') + classes << %(stripes-#{stripes}) + end + styles = [] + if (autowidth = node.option? 'autowidth') && !(node.attr? 'width') + classes << 'fit-content' + elsif (tablewidth = node.attr 'tablepcwidth') == 100 + classes << 'stretch' + else + styles << %(width: #{tablewidth}%;) + end + classes << (node.attr 'float') if node.attr? 'float' + if (role = node.role) + classes << role + end + class_attribute = %( class="#{classes.join ' '}") + style_attribute = styles.empty? ? '' : %( style="#{styles.join ' '}") + + result << %() + result << %(#{node.captioned_title}) if node.title? + if (node.attr 'rowcount') > 0 + slash = @void_element_slash + result << '' + if autowidth + result += (Array.new node.columns.size, %()) + else + node.columns.each do |col| + result << ((col.option? 'autowidth') ? %() : %()) end end - if (role = node.role) - classes << role - end - class_attribute = %( class="#{classes * ' '}") - styles << %(float: #{node.attr 'float'};) if node.attr? 'float' - style_attribute = styles.empty? ? nil : %( style="#{styles * ' '}") - - result << %() - result << %(#{node.captioned_title}) if node.title? - if (node.attr 'rowcount') > 0 - slash = @void_element_slash - result << '' - if node.option? 'autowidth' - tag = %() - node.columns.size.times do - result << tag - end - else - node.columns.each do |col| - result << %() - end - end - result << '' - [:head, :foot, :body].select {|tsec| !node.rows[tsec].empty? }.each do |tsec| - result << %() - node.rows[tsec].each do |row| - result << '' - row.each do |cell| - if tsec == :head - cell_content = cell.text + result << '' + node.rows.to_h.each do |tsec, rows| + next if rows.empty? + result << %() + rows.each do |row| + result << '' + row.each do |cell| + if tsec == :head + cell_content = cell.text + else + case cell.style + when :asciidoc + cell_content = %(
#{cell.content}
) + when :literal + cell_content = %(
#{cell.text}
) else - case cell.style - when :asciidoc - cell_content = %(
#{cell.content}
) - when :verse - cell_content = %(
#{cell.text}
) - when :literal - cell_content = %(
#{cell.text}
) - else - cell_content = '' - cell.content.each do |text| - cell_content = %(#{cell_content}

#{text}

) - end - end + cell_content = (cell_content = cell.content).empty? ? '' : %(

#{cell_content.join '

+

'}

) end - - cell_tag_name = (tsec == :head || cell.style == :header ? 'th' : 'td') - cell_class_attribute = %( class="tableblock halign-#{cell.attr 'halign'} valign-#{cell.attr 'valign'}") - cell_colspan_attribute = cell.colspan ? %( colspan="#{cell.colspan}") : nil - cell_rowspan_attribute = cell.rowspan ? %( rowspan="#{cell.rowspan}") : nil - cell_style_attribute = (node.document.attr? 'cellbgcolor') ? %( style="background-color: #{node.document.attr 'cellbgcolor'};") : nil - result << %(<#{cell_tag_name}#{cell_class_attribute}#{cell_colspan_attribute}#{cell_rowspan_attribute}#{cell_style_attribute}>#{cell_content}) end - result << '' + + cell_tag_name = (tsec == :head || cell.style == :header ? 'th' : 'td') + cell_class_attribute = %( class="tableblock halign-#{cell.attr 'halign'} valign-#{cell.attr 'valign'}") + cell_colspan_attribute = cell.colspan ? %( colspan="#{cell.colspan}") : '' + cell_rowspan_attribute = cell.rowspan ? %( rowspan="#{cell.rowspan}") : '' + cell_style_attribute = (node.document.attr? 'cellbgcolor') ? %( style="background-color: #{node.document.attr 'cellbgcolor'};") : '' + result << %(<#{cell_tag_name}#{cell_class_attribute}#{cell_colspan_attribute}#{cell_rowspan_attribute}#{cell_style_attribute}>#{cell_content}) end - result << %(
) + result << '' end + result << %(
) end - result << '' - result * EOL end + result << '' + result.join LF + end - def toc node - unless (doc = node.document).attr?('toc-placement', 'macro') && doc.sections? && (doc.attr? 'toc') - return '' - end - - if node.id - id_attr = %( id="#{node.id}") - title_id_attr = %( id="#{node.id}title") - else - id_attr = ' id="toc"' - title_id_attr = ' id="toctitle"' - end - title = node.title? ? node.title : (doc.attr 'toc-title') - levels = (node.attr? 'levels') ? (node.attr 'levels').to_i : nil - role = node.role? ? node.role : (doc.attr 'toc-class', 'toc') + def convert_toc node + unless (doc = node.document).attr?('toc-placement', 'macro') && doc.sections? && (doc.attr? 'toc') + return '' + end + + if node.id + id_attr = %( id="#{node.id}") + title_id_attr = %( id="#{node.id}title") + else + id_attr = ' id="toc"' + title_id_attr = ' id="toctitle"' + end + title = node.title? ? node.title : (doc.attr 'toc-title') + levels = (node.attr? 'levels') ? (node.attr 'levels').to_i : nil + role = node.role? ? node.role : (doc.attr 'toc-class', 'toc') - %( + %( #{title} -#{outline doc, :toclevels => levels} +#{convert_outline doc, toclevels: levels} ) - end + end - def ulist node - result = [] - id_attribute = node.id ? %( id="#{node.id}") : nil - div_classes = ['ulist', node.style, node.role].compact - marker_checked = nil - marker_unchecked = nil - if (checklist = node.option? 'checklist') - div_classes.insert 1, 'checklist' - ul_class_attribute = ' class="checklist"' - if node.option? 'interactive' - if @xml_mode - marker_checked = ' ' - marker_unchecked = ' ' - else - marker_checked = ' ' - marker_unchecked = ' ' - end + def convert_ulist node + result = [] + id_attribute = node.id ? %( id="#{node.id}") : '' + div_classes = ['ulist', node.style, node.role].compact + marker_checked = marker_unchecked = '' + if (checklist = node.option? 'checklist') + div_classes.unshift div_classes.shift, 'checklist' + ul_class_attribute = ' class="checklist"' + if node.option? 'interactive' + if @xml_mode + marker_checked = ' ' + marker_unchecked = ' ' else - if node.document.attr? 'icons', 'font' - marker_checked = ' ' - marker_unchecked = ' ' - else - marker_checked = '✓ ' - marker_unchecked = '❏ ' - end + marker_checked = ' ' + marker_unchecked = ' ' end + elsif node.document.attr? 'icons', 'font' + marker_checked = ' ' + marker_unchecked = ' ' + else + marker_checked = '✓ ' + marker_unchecked = '❏ ' + end + else + ul_class_attribute = node.style ? %( class="#{node.style}") : '' + end + result << %() + result << %(
#{node.title}
) if node.title? + result << %() + + node.items.each do |item| + if item.id + result << %(
  • ) + elsif item.role + result << %(
  • ) else - ul_class_attribute = node.style ? %( class="#{node.style}") : nil - end - result << %() - result << %(
    #{node.title}
    ) if node.title? - result << %() - - node.items.each do |item| result << '
  • ' - if checklist && (item.attr? 'checkbox') - result << %(

    #{(item.attr? 'checked') ? marker_checked : marker_unchecked}#{item.text}

    ) - else - result << %(

    #{item.text}

    ) - end - result << item.content if item.blocks? - result << '
  • ' end - - result << '' - result << '' - result * EOL - end - - def verse node - id_attribute = node.id ? %( id="#{node.id}") : nil - classes = ['verseblock', node.role].compact - class_attribute = %( class="#{classes * ' '}") - title_element = node.title? ? %(\n
    #{node.title}
    ) : nil - attribution = (node.attr? 'attribution') ? (node.attr 'attribution') : nil - citetitle = (node.attr? 'citetitle') ? (node.attr 'citetitle') : nil - if attribution || citetitle - cite_element = citetitle ? %(#{citetitle}) : nil - attribution_text = attribution ? %(— #{attribution}#{citetitle ? "\n" : nil}) : nil - attribution_element = %(\n
    \n#{attribution_text}#{cite_element}\n
    ) + if checklist && (item.attr? 'checkbox') + result << %(

    #{(item.attr? 'checked') ? marker_checked : marker_unchecked}#{item.text}

    ) else - attribution_element = nil + result << %(

    #{item.text}

    ) end + result << item.content if item.blocks? + result << '' + end - %(#{title_element} + result << '' + result << '' + result.join LF + end + + def convert_verse node + id_attribute = node.id ? %( id="#{node.id}") : '' + classes = ['verseblock', node.role].compact + class_attribute = %( class="#{classes.join ' '}") + title_element = node.title? ? %(\n
    #{node.title}
    ) : '' + attribution = (node.attr? 'attribution') ? (node.attr 'attribution') : nil + citetitle = (node.attr? 'citetitle') ? (node.attr 'citetitle') : nil + if attribution || citetitle + cite_element = citetitle ? %(#{citetitle}) : '' + attribution_text = attribution ? %(— #{attribution}#{citetitle ? "\n" : ''}) : '' + attribution_element = %(\n
    \n#{attribution_text}#{cite_element}\n
    ) + else + attribution_element = '' + end + + %(#{title_element}
    #{node.content}
    #{attribution_element} ) - end + end - def video node - xml = @xml_mode - id_attribute = node.id ? %( id="#{node.id}") : nil - classes = ['videoblock', node.role].compact - class_attribute = %( class="#{classes * ' '}") - title_element = node.title? ? %(\n
    #{node.captioned_title}
    ) : nil - width_attribute = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : nil - height_attribute = (node.attr? 'height') ? %( height="#{node.attr 'height'}") : nil - case node.attr 'poster' - when 'vimeo' - unless (asset_uri_scheme = (node.document.attr 'asset-uri-scheme', 'https')).empty? - asset_uri_scheme = %(#{asset_uri_scheme}:) - end - start_anchor = (node.attr? 'start', nil, false) ? %(#at=#{node.attr 'start'}) : nil - delimiter = '?' - autoplay_param = (node.option? 'autoplay') ? %(#{delimiter}autoplay=1) : nil - delimiter = '&' if autoplay_param - loop_param = (node.option? 'loop') ? %(#{delimiter}loop=1) : nil - %(#{title_element} + def convert_video node + xml = @xml_mode + id_attribute = node.id ? %( id="#{node.id}") : '' + classes = ['videoblock'] + classes << (node.attr 'float') if node.attr? 'float' + classes << %(text-#{node.attr 'align'}) if node.attr? 'align' + classes << node.role if node.role + class_attribute = %( class="#{classes.join ' '}") + title_element = node.title? ? %(\n
    #{node.title}
    ) : '' + width_attribute = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : '' + height_attribute = (node.attr? 'height') ? %( height="#{node.attr 'height'}") : '' + case node.attr 'poster' + when 'vimeo' + unless (asset_uri_scheme = (node.document.attr 'asset-uri-scheme', 'https')).empty? + asset_uri_scheme = %(#{asset_uri_scheme}:) + end + start_anchor = (node.attr? 'start') ? %(#at=#{node.attr 'start'}) : '' + delimiter = ['?'] + autoplay_param = (node.option? 'autoplay') ? %(#{delimiter.pop || '&'}autoplay=1) : '' + loop_param = (node.option? 'loop') ? %(#{delimiter.pop || '&'}loop=1) : '' + muted_param = (node.option? 'muted') ? %(#{delimiter.pop || '&'}muted=1) : '' + %(#{title_element}
    - +
    ) - when 'youtube' - unless (asset_uri_scheme = (node.document.attr 'asset-uri-scheme', 'https')).empty? - asset_uri_scheme = %(#{asset_uri_scheme}:) - end - rel_param_val = (node.option? 'related') ? 1 : 0 - # NOTE start and end must be seconds (t parameter allows XmYs where X is minutes and Y is seconds) - start_param = (node.attr? 'start', nil, false) ? %(&start=#{node.attr 'start'}) : nil - end_param = (node.attr? 'end', nil, false) ? %(&end=#{node.attr 'end'}) : nil - autoplay_param = (node.option? 'autoplay') ? '&autoplay=1' : nil - loop_param = (node.option? 'loop') ? '&loop=1' : nil - controls_param = (node.option? 'nocontrols') ? '&controls=0' : nil - # cover both ways of controlling fullscreen option - if node.option? 'nofullscreen' - fs_param = '&fs=0' - fs_attribute = nil - else - fs_param = nil - fs_attribute = append_boolean_attribute 'allowfullscreen', xml - end - modest_param = (node.option? 'modest') ? '&modestbranding=1' : nil - theme_param = (node.attr? 'theme', nil, false) ? %(&theme=#{node.attr 'theme'}) : nil - hl_param = (node.attr? 'lang') ? %(&hl=#{node.attr 'lang'}) : nil - - # parse video_id/list_id syntax where list_id (i.e., playlist) is optional - target, list = (node.attr 'target').split '/', 2 - if (list ||= (node.attr 'list', nil, false)) - list_param = %(&list=#{list}) + when 'youtube' + unless (asset_uri_scheme = (node.document.attr 'asset-uri-scheme', 'https')).empty? + asset_uri_scheme = %(#{asset_uri_scheme}:) + end + rel_param_val = (node.option? 'related') ? 1 : 0 + # NOTE start and end must be seconds (t parameter allows XmYs where X is minutes and Y is seconds) + start_param = (node.attr? 'start') ? %(&start=#{node.attr 'start'}) : '' + end_param = (node.attr? 'end') ? %(&end=#{node.attr 'end'}) : '' + autoplay_param = (node.option? 'autoplay') ? '&autoplay=1' : '' + loop_param = (has_loop_param = node.option? 'loop') ? '&loop=1' : '' + mute_param = (node.option? 'muted') ? '&mute=1' : '' + controls_param = (node.option? 'nocontrols') ? '&controls=0' : '' + # cover both ways of controlling fullscreen option + if node.option? 'nofullscreen' + fs_param = '&fs=0' + fs_attribute = '' + else + fs_param = '' + fs_attribute = append_boolean_attribute 'allowfullscreen', xml + end + modest_param = (node.option? 'modest') ? '&modestbranding=1' : '' + theme_param = (node.attr? 'theme') ? %(&theme=#{node.attr 'theme'}) : '' + hl_param = (node.attr? 'lang') ? %(&hl=#{node.attr 'lang'}) : '' + + # parse video_id/list_id syntax where list_id (i.e., playlist) is optional + target, list = (node.attr 'target').split '/', 2 + if (list ||= (node.attr 'list')) + list_param = %(&list=#{list}) + else + # parse dynamic playlist syntax: video_id1,video_id2,... + target, playlist = target.split ',', 2 + if (playlist ||= (node.attr 'playlist')) + # INFO playlist bar doesn't appear in Firefox unless showinfo=1 and modestbranding=1 + list_param = %(&playlist=#{playlist}) else - # parse dynamic playlist syntax: video_id1,video_id2,... - target, playlist = target.split ',', 2 - if (playlist ||= (node.attr 'playlist', nil, false)) - # INFO playlist bar doesn't appear in Firefox unless showinfo=1 and modestbranding=1 - list_param = %(&playlist=#{playlist}) - else - # NOTE for loop to work, playlist must be specified; use VIDEO_ID if there's no explicit playlist - list_param = loop_param ? %(&playlist=#{target}) : nil - end + # NOTE for loop to work, playlist must be specified; use VIDEO_ID if there's no explicit playlist + list_param = has_loop_param ? %(&playlist=#{target}) : '' end + end - %(#{title_element} + %(#{title_element}
    - +
    ) - else - poster_attribute = %(#{poster = node.attr 'poster'}).empty? ? nil : %( poster="#{node.media_uri poster}") - start_t = node.attr 'start', nil, false - end_t = node.attr 'end', nil, false - time_anchor = (start_t || end_t) ? %(#t=#{start_t}#{end_t ? ',' : nil}#{end_t}) : nil - %(#{title_element} + else + poster_attribute = (val = node.attr 'poster').nil_or_empty? ? '' : %( poster="#{node.media_uri val}") + preload_attribute = (val = node.attr 'preload').nil_or_empty? ? '' : %( preload="#{val}") + start_t = node.attr 'start' + end_t = node.attr 'end' + time_anchor = (start_t || end_t) ? %(#t=#{start_t || ''}#{end_t ? ",#{end_t}" : ''}) : '' + %(#{title_element}
    -
    ) - end end + end - def inline_anchor node - target = node.target - case node.type - when :xref - refid = node.attributes['refid'] || target - # NOTE we lookup text in converter because DocBook doesn't need this logic - text = node.text || (node.document.references[:ids][refid] || %([#{refid}])) - # FIXME shouldn't target be refid? logic seems confused here - %(#{text}) - when :ref - %() - when :link - attrs = [] - attrs << %( id="#{node.id}") if node.id - if (role = node.role) - attrs << %( class="#{role}") + def convert_inline_anchor node + case node.type + when :xref + if (path = node.attributes['path']) + attrs = (append_link_constraint_attrs node, node.role ? [%( class="#{node.role}")] : []).join + text = node.text || path + else + attrs = node.role ? %( class="#{node.role}") : '' + unless (text = node.text) + refid = node.attributes['refid'] + if AbstractNode === (ref = (@refs ||= node.document.catalog[:refs])[refid]) + text = (ref.xreftext node.attr('xrefstyle', nil, true)) || %([#{refid}]) + else + text = %([#{refid}]) + end end - attrs << %( title="#{node.attr 'title'}") if node.attr? 'title', nil, false - attrs << %( target="#{node.attr 'window'}") if node.attr? 'window', nil, false - %(#{node.text}) - when :bibref - %([#{target}]) - else - warn %(asciidoctor: WARNING: unknown anchor type: #{node.type.inspect}) end + %(#{text}) + when :ref + %() + when :link + attrs = node.id ? [%( id="#{node.id}")] : [] + attrs << %( class="#{node.role}") if node.role + attrs << %( title="#{node.attr 'title'}") if node.attr? 'title' + %(#{node.text}) + when :bibref + %([#{node.reftext || node.id}]) + else + logger.warn %(unknown anchor type: #{node.type.inspect}) + nil end + end - def inline_break node - %(#{node.text}) - end + def convert_inline_break node + %(#{node.text}) + end + + def convert_inline_button node + %(#{node.text}) + end - def inline_button node - %(#{node.text}) + def convert_inline_callout node + if node.document.attr? 'icons', 'font' + %((#{node.text})) + elsif node.document.attr? 'icons' + src = node.icon_uri("callouts/#{node.text}") + %(#{node.text}) + else + %(#{node.attributes['guard']}(#{node.text})) end + end - def inline_callout node - if node.document.attr? 'icons', 'font' - %((#{node.text})) - elsif node.document.attr? 'icons' - src = node.icon_uri("callouts/#{node.text}") - %(#{node.text}) + def convert_inline_footnote node + if (index = node.attr 'index') + if node.type == :xref + %([#{index}]) else - %((#{node.text})) + id_attr = node.id ? %( id="_footnote_#{node.id}") : '' + %([#{index}]) end + elsif node.type == :xref + %([#{node.text}]) end + end - def inline_footnote node - if (index = node.attr 'index') - if node.type == :xref - %([#{index}]) - else - id_attr = node.id ? %( id="_footnote_#{node.id}") : nil - %([#{index}]) + def convert_inline_image node + if (type = node.type || 'image') == 'icon' && (node.document.attr? 'icons', 'font') + class_attr_val = %(fa fa-#{node.target}) + { 'size' => 'fa-', 'rotate' => 'fa-rotate-', 'flip' => 'fa-flip-' }.each do |key, prefix| + class_attr_val = %(#{class_attr_val} #{prefix}#{node.attr key}) if node.attr? key + end + title_attr = (node.attr? 'title') ? %( title="#{node.attr 'title'}") : '' + img = %() + elsif type == 'icon' && !(node.document.attr? 'icons') + img = %([#{node.alt}]) + else + target = node.target + attrs = ['width', 'height', 'title'].map {|name| (node.attr? name) ? %( #{name}="#{node.attr name}") : '' }.join + if type != 'icon' && ((node.attr? 'format', 'svg') || (target.include? '.svg')) && + node.document.safe < SafeMode::SECURE && ((svg = (node.option? 'inline')) || (obj = (node.option? 'interactive'))) + if svg + img = (read_svg_contents node, target) || %(#{node.alt}) + elsif obj + fallback = (node.attr? 'fallback') ? %(#{encode_attribute_value node.alt}) : %(#{node.alt}) + img = %(#{fallback}) end - elsif node.type == :xref - %([#{node.text}]) end + img ||= %(#{encode_attribute_value node.alt}) end - - def inline_image node - if (type = node.type) == 'icon' && (node.document.attr? 'icons', 'font') - class_attr_val = %(fa fa-#{node.target}) - {'size' => 'fa-', 'rotate' => 'fa-rotate-', 'flip' => 'fa-flip-'}.each do |key, prefix| - class_attr_val = %(#{class_attr_val} #{prefix}#{node.attr key}) if node.attr? key - end - title_attr = (node.attr? 'title') ? %( title="#{node.attr 'title'}") : nil - img = %() - elsif type == 'icon' && !(node.document.attr? 'icons') - img = %([#{node.attr 'alt'}]) - else - target = node.target - attrs = ['width', 'height', 'title'].map {|name| (node.attr? name) ? %( #{name}="#{node.attr name}") : nil }.join - if type != 'icon' && ((node.attr? 'format', 'svg', false) || (target.include? '.svg')) && - node.document.safe < SafeMode::SECURE && ((svg = (node.option? 'inline')) || (obj = (node.option? 'interactive'))) - if svg - img = (read_svg_contents node, target) || %(#{node.attr 'alt'}) - elsif obj - fallback = (node.attr? 'fallback') ? %(#{node.attr 'alt'}) : %(#{node.attr 'alt'}) - img = %(#{fallback}) - end - end - img ||= %(#{node.attr 'alt'}) - end - if node.attr? 'link' - window_attr = (node.attr? 'window') ? %( target="#{node.attr 'window'}") : nil - img = %(#{img}) + if node.attr? 'link' + img = %(#{img}) + end + if (role = node.role) + if node.attr? 'float' + class_attr_val = %(#{type} #{node.attr 'float'} #{role}) + else + class_attr_val = %(#{type} #{role}) end - class_attr_val = (role = node.role) ? %(#{type} #{role}) : type - style_attr = (node.attr? 'float') ? %( style="float: #{node.attr 'float'}") : nil - %(#{img}) + elsif node.attr? 'float' + class_attr_val = %(#{type} #{node.attr 'float'}) + else + class_attr_val = type end + %(#{img}) + end + + def convert_inline_indexterm node + node.type == :visible ? node.text : '' + end - def inline_indexterm node - node.type == :visible ? node.text : '' + def convert_inline_kbd node + if (keys = node.attr 'keys').size == 1 + %(#{keys[0]}) + else + %(#{keys.join '+'}) end + end - def inline_kbd node - if (keys = node.attr 'keys').size == 1 - %(#{keys[0]}) + def convert_inline_menu node + caret = (node.document.attr? 'icons', 'font') ? '  ' : '  ' + submenu_joiner = %(#{caret}) + menu = node.attr 'menu' + if (submenus = node.attr 'submenus').empty? + if (menuitem = node.attr 'menuitem') + %(#{menu}#{caret}#{menuitem}) else - key_combo = keys.map {|key| %(#{key}+) }.join.chop - %(#{key_combo}) + %(#{menu}) end + else + %(#{menu}#{caret}#{submenus.join submenu_joiner}#{caret}#{node.attr 'menuitem'}) end + end - def inline_menu node - menu = node.attr 'menu' - if !(submenus = node.attr 'submenus').empty? - submenu_path = submenus.map {|submenu| %(#{submenu} ▸ ) }.join.chop - %(#{menu} ▸ #{submenu_path} #{node.attr 'menuitem'}) - elsif (menuitem = node.attr 'menuitem') - %(#{menu} ▸ #{menuitem}) + def convert_inline_quoted node + open, close, tag = QUOTE_TAGS[node.type] + if node.id + class_attr = node.role ? %( class="#{node.role}") : '' + if tag + %(#{open.chop} id="#{node.id}"#{class_attr}>#{node.text}#{close}) + else + %(#{open}#{node.text}#{close}) + end + elsif node.role + if tag + %(#{open.chop} class="#{node.role}">#{node.text}#{close}) else - %(#{menu}) + %(#{open}#{node.text}#{close}) end + else + %(#{open}#{node.text}#{close}) end + end - def inline_quoted node - open, close, is_tag = QUOTE_TAGS[node.type] - if (role = node.role) - if is_tag - quoted_text = %(#{open.chop} class="#{role}">#{node.text}#{close}) - else - quoted_text = %(#{open}#{node.text}#{close}) + # NOTE expose read_svg_contents for Bespoke converter + def read_svg_contents node, target + if (svg = node.read_contents target, start: (node.document.attr 'imagesdir'), normalize: true, label: 'SVG') + svg = svg.sub SvgPreambleRx, '' unless svg.start_with? ') end - else - quoted_text = %(#{open}#{node.text}#{close}) end - - node.id ? %(#{quoted_text}) : quoted_text + svg = %(#{new_start_tag}#{svg[old_start_tag.length..-1]}) if new_start_tag end + svg + end - def append_boolean_attribute name, xml - xml ? %( #{name}="#{name}") : %( #{name}) + private + + def append_boolean_attribute name, xml + xml ? %( #{name}="#{name}") : %( #{name}) + end + + def append_link_constraint_attrs node, attrs = [] + rel = 'nofollow' if node.option? 'nofollow' + if (window = node.attributes['window']) + attrs << %( target="#{window}") + attrs << (rel ? %( rel="#{rel} noopener") : ' rel="noopener"') if window == '_blank' || (node.option? 'noopener') + elsif rel + attrs << %( rel="#{rel}") end + attrs + end - def read_svg_contents node, target - if (svg = node.read_contents target, :start => (node.document.attr 'imagesdir'), :normalize => true, :label => 'SVG') - svg = svg.sub SvgPreambleRx, '' unless svg.start_with? ') - end - end - svg = %(#{new_start_tag}#{svg[old_start_tag.length..-1]}) if new_start_tag - end - svg + def encode_attribute_value val + (val.include? '"') ? (val.gsub '"', '"') : val + end + + def generate_manname_section node + manname_title = node.attr 'manname-title', 'Name' + if (next_section = node.sections[0]) && (next_section_title = next_section.title) == next_section_title.upcase + manname_title = manname_title.upcase end + manname_id_attr = (manname_id = node.attr 'manname-id') ? %( id="#{manname_id}") : '' + %(#{manname_title} +
    +

    #{node.attr 'manname'} - #{node.attr 'manpurpose'}

    +
    ) end + + # NOTE adapt to older converters that relied on unprefixed method names + def method_missing id, *params + !((name = id.to_s).start_with? 'convert_') && (handles? name) ? (send %(convert_#{name}), *params) : super + end +end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/converter/manpage.rb asciidoctor-2.0.10/lib/asciidoctor/converter/manpage.rb --- asciidoctor-1.5.5/lib/asciidoctor/converter/manpage.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/converter/manpage.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,687 +1,739 @@ +# frozen_string_literal: true module Asciidoctor - # A built-in {Converter} implementation that generates the man page (troff) format. - # - # The output follows the groff man page definition while also trying to be - # consistent with the output produced by the a2x tool from AsciiDoc Python. - # - # See http://www.gnu.org/software/groff/manual/html_node/Man-usage.html#Man-usage - class Converter::ManPageConverter < Converter::BuiltIn - LF = %(\n) - TAB = %(\t) - WHITESPACE = %(#{LF}#{TAB} ) - ET = ' ' * 8 - ESC = %(\u001b) # troff leader marker - ESC_BS = %(#{ESC}\\) # escaped backslash (indicates troff formatting sequence) - ESC_FS = %(#{ESC}.) # escaped full stop (indicates troff macro) - - LiteralBackslashRx = /(?:\A|[^#{ESC}])\\/ - LeadingPeriodRx = /^\./ - EscapedMacroRx = /^(?:#{ESC}\\c\n)?#{ESC}\.((?:URL|MTO) ".*?" ".*?" )( |[^\s]*)(.*?)(?: *#{ESC}\\c)?$/ - MockBoundaryRx = /<\/?BOUNDARY>/ - EmDashCharRefRx = /—(?:;​)?/ - EllipsisCharRefRx = /…(?:​)?/ - - # Converts HTML entity references back to their original form, escapes - # special man characters and strips trailing whitespace. - # - # It's crucial that text only ever pass through manify once. - # - # str - the String to convert - # opts - an Hash of options to control processing (default: {}) - # * :preserve_space a Boolean that indicates whether to preserve spaces (only expanding tabs) if true - # or to collapse all adjacent whitespace to a single space if false (default: true) - # * :append_newline a Boolean that indicates whether to append an endline to the result (default: false) - def manify str, opts = {} - str = ((opts.fetch :preserve_space, true) ? (str.gsub TAB, ET) : (str.tr_s WHITESPACE, ' ')). - gsub(LiteralBackslashRx, '\&(rs'). # literal backslash (not a troff escape sequence) - gsub(LeadingPeriodRx, '\\\&.'). # leading . is used in troff for macro call or other formatting; replace with \&. - # drop orphaned \c escape lines, unescape troff macro, quote adjacent character, isolate macro line - gsub(EscapedMacroRx) { (rest = $3.lstrip).empty? ? %(.#$1"#$2") : %(.#$1"#$2"#{LF}#{rest}) }. - gsub('-', '\-'). - gsub('<', '<'). - gsub('>', '>'). - gsub(' ', '\~'). # non-breaking space - gsub('©', '\(co'). # copyright sign - gsub('®', '\(rg'). # registered sign - gsub('™', '\(tm'). # trademark sign - gsub(' ', ' '). # thin space - gsub('–', '\(en'). # en dash - gsub(EmDashCharRefRx, '\(em'). # em dash - gsub('‘', '\(oq'). # left single quotation mark - gsub('’', '\(cq'). # right single quotation mark - gsub('“', '\(lq'). # left double quotation mark - gsub('”', '\(rq'). # right double quotation mark - gsub(EllipsisCharRefRx, '...'). # horizontal ellipsis - gsub('←', '\(<-'). # leftwards arrow - gsub('→', '\(->'). # rightwards arrow - gsub('⇐', '\(lA'). # leftwards double arrow - gsub('⇒', '\(rA'). # rightwards double arrow - gsub('​', '\:'). # zero width space - gsub('\'', '\(aq'). # apostrophe-quote - gsub(MockBoundaryRx, ''). # mock boundary - gsub(ESC_BS, '\\'). # unescape troff backslash (NOTE update if more escapes are added) - rstrip # strip trailing space - opts[:append_newline] ? %(#{str}#{LF}) : str - end - - def skip_with_warning node, name = nil - warn %(asciidoctor: WARNING: converter missing for #{name || node.node_name} node in manpage backend) - nil - end +# A built-in {Converter} implementation that generates the man page (troff) format. +# +# The output follows the groff man page definition while also trying to be +# consistent with the output produced by the a2x tool from AsciiDoc Python. +# +# See http://www.gnu.org/software/groff/manual/html_node/Man-usage.html#Man-usage +class Converter::ManPageConverter < Converter::Base + register_for 'manpage' + + WHITESPACE = %(#{LF}#{TAB} ) + ET = ' ' * 8 + ESC = ?\u001b # troff leader marker + ESC_BS = %(#{ESC}\\) # escaped backslash (indicates troff formatting sequence) + ESC_FS = %(#{ESC}.) # escaped full stop (indicates troff macro) + + LiteralBackslashRx = /(?:\A|[^#{ESC}])\\/ + LeadingPeriodRx = /^\./ + EscapedMacroRx = /^(?:#{ESC}\\c\n)?#{ESC}\.((?:URL|MTO) "#{CC_ANY}*?" "#{CC_ANY}*?" )( |[^\s]*)(#{CC_ANY}*?)(?: *#{ESC}\\c)?$/ + MockBoundaryRx = /<\/?BOUNDARY>/ + EmDashCharRefRx = /—(?:​)?/ + EllipsisCharRefRx = /…(?:​)?/ + WrappedIndentRx = /#{CG_BLANK}*#{LF}#{CG_BLANK}*/ + + def initialize backend, opts = {} + @backend = backend + init_backend_traits basebackend: 'manpage', filetype: 'man', outfilesuffix: '.man', supports_templates: true + end - def document node - unless node.attr? 'mantitle' - raise 'asciidoctor: ERROR: doctype must be set to manpage when using manpage backend' - end - mantitle = node.attr 'mantitle' - manvolnum = node.attr 'manvolnum', '1' - manname = node.attr 'manname', mantitle - docdate = (node.attr? 'reproducible') ? nil : (node.attr 'docdate') - # NOTE the first line enables the table (tbl) preprocessor, necessary for non-Linux systems - result = [%('\\" t + def convert_document node + unless node.attr? 'mantitle' + raise 'asciidoctor: ERROR: doctype must be set to manpage when using manpage backend' + end + mantitle = node.attr 'mantitle' + manvolnum = node.attr 'manvolnum', '1' + manname = node.attr 'manname', mantitle + manmanual = node.attr 'manmanual' + mansource = node.attr 'mansource' + docdate = (node.attr? 'reproducible') ? nil : (node.attr 'docdate') + # NOTE the first line enables the table (tbl) preprocessor, necessary for non-Linux systems + result = [%('\\" t .\\" Title: #{mantitle} -.\\" Author: #{(node.attr? 'authors') ? (node.attr 'authors') : '[see the "AUTHORS" section]'} +.\\" Author: #{(node.attr? 'authors') ? (node.attr 'authors') : '[see the "AUTHOR(S)" section]'} .\\" Generator: Asciidoctor #{node.attr 'asciidoctor-version'})] - result << %(.\\" Date: #{docdate}) if docdate - result << %(.\\" Manual: #{(manual = node.attr 'manmanual') || '\ \&'} -.\\" Source: #{(source = node.attr 'mansource') || '\ \&'} + result << %(.\\" Date: #{docdate}) if docdate + result << %(.\\" Manual: #{manmanual ? (manmanual.tr_s WHITESPACE, ' ') : '\ \&'} +.\\" Source: #{mansource ? (mansource.tr_s WHITESPACE, ' ') : '\ \&'} .\\" Language: English .\\") - # TODO add document-level setting to disable capitalization of manname - result << %(.TH "#{manify manname.upcase}" "#{manvolnum}" "#{docdate}" "#{source ? (manify source) : '\ \&'}" "#{manual ? (manify manual) : '\ \&'}") - # define portability settings - # see http://bugs.debian.org/507673 - # see http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html - result << '.ie \n(.g .ds Aq \(aq' - result << '.el .ds Aq \'' - # set sentence_space_size to 0 to prevent extra space between sentences separated by a newline - # the alternative is to add \& at the end of the line - result << '.ss \n[.ss] 0' - # disable hyphenation - result << '.nh' - # disable justification (adjust text to left margin only) - result << '.ad l' - # define URL macro for portability - # see http://web.archive.org/web/20060102165607/http://people.debian.org/~branden/talks/wtfm/wtfm.pdf - # - # Use: .URL "http://www.debian.org" "Debian" "." - # - # * First argument: the URL - # * Second argument: text to be hyperlinked - # * Third (optional) argument: text that needs to immediately trail - # the hyperlink without intervening whitespace - result << '.de URL -\\\\$2 \(laURL: \\\\$1 \(ra\\\\$3 + # TODO add document-level setting to disable capitalization of manname + result << %(.TH "#{manify manname.upcase}" "#{manvolnum}" "#{docdate}" "#{mansource ? (manify mansource) : '\ \&'}" "#{manmanual ? (manify manmanual) : '\ \&'}") + # define portability settings + # see http://bugs.debian.org/507673 + # see http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html + result << '.ie \n(.g .ds Aq \(aq' + result << '.el .ds Aq \'' + # set sentence_space_size to 0 to prevent extra space between sentences separated by a newline + # the alternative is to add \& at the end of the line + result << '.ss \n[.ss] 0' + # disable hyphenation + result << '.nh' + # disable justification (adjust text to left margin only) + result << '.ad l' + # define URL macro for portability + # see http://web.archive.org/web/20060102165607/http://people.debian.org/~branden/talks/wtfm/wtfm.pdf + # + # Usage + # + # .URL "http://www.debian.org" "Debian" "." + # + # * First argument: the URL + # * Second argument: text to be hyperlinked + # * Third (optional) argument: text that needs to immediately trail the hyperlink without intervening whitespace + result << '.de URL +\\fI\\\\$2\\fP <\\\\$1>\\\\$3 .. -.if \n[.g] .mso www.tmac' - result << %(.LINKSTYLE #{node.attr 'man-linkstyle', 'blue R < >'}) +.als MTO URL +.if \n[.g] \{\ +. mso www.tmac +. am URL +. ad l +. . +. am MTO +. ad l +. .' + result << %(. LINKSTYLE #{node.attr 'man-linkstyle', 'blue R < >'}) + result << '.\}' + + unless node.noheader + if node.attr? 'manpurpose' + mannames = node.attr 'mannames', [manname] + result << %(.SH "#{(node.attr 'manname-title', 'NAME').upcase}" +#{mannames.map {|n| manify n }.join ', '} \\- #{manify node.attr('manpurpose'), whitespace: :normalize}) + end + end - unless node.noheader - if node.attr? 'manpurpose' - result << %(.SH "#{node.attr 'manname-title'}" -#{manify mantitle} \\- #{manify node.attr 'manpurpose'}) + result << node.content + + # QUESTION should NOTES come after AUTHOR(S)? + if node.footnotes? && !(node.attr? 'nofootnotes') + result << '.SH "NOTES"' + result.concat(node.footnotes.map {|fn| %(#{fn.index}. #{fn.text}) }) + end + + unless (authors = node.authors).empty? + if authors.size > 1 + result << '.SH "AUTHORS"' + authors.each do |author| + result << %(.sp +#{author.name}) end + else + result << %(.SH "AUTHOR" +.sp +#{authors[0].name}) end + end - result << node.content - - # QUESTION should NOTES come after AUTHOR(S)? - if node.footnotes? && !(node.attr? 'nofootnotes') - result << '.SH "NOTES"' - result.concat(node.footnotes.map {|fn| %(#{fn.index}. #{fn.text}) }) - end + result.join LF + end - # FIXME detect single author and use appropriate heading; itemize the authors if multiple - if node.attr? 'authors' - result << %(.SH "AUTHOR(S)" -.sp -\\fB#{node.attr 'authors'}\\fP -.RS 4 -Author(s). -.RE) - end + # NOTE embedded doesn't really make sense in the manpage backend + def convert_embedded node + result = [node.content] - result * LF + if node.footnotes? && !(node.attr? 'nofootnotes') + result << '.SH "NOTES"' + result.concat(node.footnotes.map {|fn| %(#{fn.index}. #{fn.text}) }) end - # NOTE embedded doesn't really make sense in the manpage backend - def embedded node - result = [node.content] - - if node.footnotes? && !(node.attr? 'nofootnotes') - result << '.SH "NOTES"' - result.concat(node.footnotes.map {|fn| %(#{fn.index}. #{fn.text}) }) - end + # QUESTION should we add an AUTHOR(S) section? - # QUESTION should we add an AUTHOR(S) section? + result.join LF + end - result * LF + def convert_section node + result = [] + if node.level > 1 + macro = 'SS' + # QUESTION why captioned title? why not when level == 1? + stitle = node.captioned_title + else + macro = 'SH' + stitle = node.title.upcase end - - def section node - slevel = node.level - # QUESTION should the check for slevel be done in section? - slevel = 1 if slevel == 0 && node.special - result = [] - if slevel > 1 - macro = 'SS' - # QUESTION why captioned title? why not for slevel == 1? - stitle = node.captioned_title - else - macro = 'SH' - stitle = node.title.upcase - end - result << %(.#{macro} "#{manify stitle}" + result << %(.#{macro} "#{manify stitle}" #{node.content}) - result * LF - end + result.join LF + end - def admonition node - result = [] - result << %(.if n \\{\\ -.sp -.\\} + def convert_admonition node + result = [] + result << %(.if n .sp .RS 4 .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .ps +1 -.B #{node.caption}#{node.title? ? "\\fP #{manify node.title}" : nil} +.B #{node.attr 'textlabel'}#{node.title? ? "\\fP: #{manify node.title}" : ''} .ps -1 .br -#{resolve_content node} +#{enclose_content node} .sp .5v .RE) - result * LF - end - - alias :audio :skip_with_warning + result.join LF + end - def colist node - result = [] - result << %(.sp + def convert_colist node + result = [] + result << %(.sp .B #{manify node.title} .br) if node.title? - result << '.TS + result << '.TS tab(:); r lw(\n(.lu*75u/100u).' - node.items.each_with_index do |item, index| - result << %(\\fB(#{index + 1})\\fP\\h'-2n':T{ -#{manify item.text} -T}) - end - result << '.TE' - result * LF + num = 0 + node.items.each do |item| + result << %(\\fB(#{num += 1})\\fP\\h'-2n':T{) + result << (manify item.text, whitespace: :normalize) + result << item.content if item.blocks? + result << 'T}' end + result << '.TE' + result.join LF + end - # TODO implement title for dlist - # TODO implement horizontal (if it makes sense) - def dlist node - result = [] - counter = 0 - node.items.each do |terms, dd| - counter += 1 - case node.style - when 'qanda' - result << %(.sp -#{counter}. #{manify([*terms].map {|dt| dt.text }.join ' ')} + # TODO implement horizontal (if it makes sense) + def convert_dlist node + result = [] + result << %(.sp +.B #{manify node.title} +.br) if node.title? + counter = 0 + node.items.each do |terms, dd| + counter += 1 + case node.style + when 'qanda' + result << %(.sp +#{counter}. #{manify terms.map {|dt| dt.text }.join ' '} .RS 4) - else - result << %(.sp -#{manify([*terms].map {|dt| dt.text }.join ', ')} + else + result << %(.sp +#{manify terms.map {|dt| dt.text }.join(', '), whitespace: :normalize} .RS 4) - end - if dd - result << (manify dd.text) if dd.text? - result << dd.content if dd.blocks? - end - result << '.RE' end - result * LF + if dd + result << (manify dd.text, whitespace: :normalize) if dd.text? + result << dd.content if dd.blocks? + end + result << '.RE' end + result.join LF + end - def example node - result = [] - result << %(.sp + def convert_example node + result = [] + result << (node.title? ? %(.sp .B #{manify node.captioned_title} -.br) if node.title? - result << %(.RS 4 -#{resolve_content node} +.br) : '.sp') + result << %(.RS 4 +#{enclose_content node} .RE) - result * LF - end + result.join LF + end - def floating_title node - %(.SS "#{manify node.title}") - end + def convert_floating_title node + %(.SS "#{manify node.title}") + end - alias :image :skip_with_warning + def convert_image node + result = [] + result << (node.title? ? %(.sp +.B #{manify node.captioned_title} +.br) : '.sp') + result << %([#{node.alt}]) + result.join LF + end - def listing node - result = [] - result << %(.sp + def convert_listing node + result = [] + result << %(.sp .B #{manify node.captioned_title} .br) if node.title? - result << %(.sp -.if n \\{\\ -.RS 4 -.\\} + result << %(.sp +.if n .RS 4 .nf -#{manify node.content} +#{manify node.content, whitespace: :preserve} .fi -.if n \\{\\ -.RE -.\\}) - result * LF - end +.if n .RE) + result.join LF + end - def literal node - result = [] - result << %(.sp + def convert_literal node + result = [] + result << %(.sp .B #{manify node.title} .br) if node.title? - result << %(.sp -.if n \\{\\ -.RS 4 -.\\} + result << %(.sp +.if n .RS 4 .nf -#{manify node.content} +#{manify node.content, whitespace: :preserve} .fi -.if n \\{\\ -.RE -.\\}) - result * LF - end +.if n .RE) + result.join LF + end - def olist node - result = [] - result << %(.sp + def convert_sidebar node + result = [] + result << (node.title? ? %(.sp +.B #{manify node.title} +.br) : '.sp') + result << %(.RS 4 +#{enclose_content node} +.RE) + result.join LF + end + + def convert_olist node + result = [] + result << %(.sp .B #{manify node.title} .br) if node.title? - node.items.each_with_index do |item, idx| - result << %(.sp + node.items.each_with_index do |item, idx| + result << %(.sp .RS 4 .ie n \\{\\ \\h'-04' #{idx + 1}.\\h'+01'\\c .\\} .el \\{\\ -.sp -1 -.IP " #{idx + 1}." 4.2 +. sp -1 +. IP " #{idx + 1}." 4.2 .\\} -#{manify item.text}) - result << item.content if item.blocks? - result << '.RE' - end - result * LF +#{manify item.text, whitespace: :normalize}) + result << item.content if item.blocks? + result << '.RE' end + result.join LF + end - def open node - case node.style - when 'abstract', 'partintro' - resolve_content node - else - node.content - end + def convert_open node + case node.style + when 'abstract', 'partintro' + enclose_content node + else + node.content end + end - # TODO use Page Control https://www.gnu.org/software/groff/manual/html_node/Page-Control.html#Page-Control - alias :page_break :skip + # TODO use Page Control https://www.gnu.org/software/groff/manual/html_node/Page-Control.html#Page-Control + alias convert_page_break skip - def paragraph node - if node.title? - %(.sp + def convert_paragraph node + if node.title? + %(.sp .B #{manify node.title} .br -#{manify node.content}) - else - %(.sp -#{manify node.content}) - end +#{manify node.content, whitespace: :normalize}) + else + %(.sp +#{manify node.content, whitespace: :normalize}) end + end - alias :preamble :content + alias convert_pass content_only + alias convert_preamble content_only - def quote node - result = [] - if node.title? - result << %(.sp -.in +.3i + def convert_quote node + result = [] + if node.title? + result << %(.sp +.RS 3 .B #{manify node.title} .br -.in) - end - attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil - attribution_line = (node.attr? 'attribution') ? %[#{attribution_line}\\(em #{node.attr 'attribution'}] : nil - result << %(.in +.3i -.ll -.3i -.nf -#{resolve_content node} -.fi +.RE) + end + attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil + attribution_line = (node.attr? 'attribution') ? %[#{attribution_line}\\(em #{node.attr 'attribution'}] : nil + result << %(.RS 3 +.ll -.6i +#{enclose_content node} .br -.in +.RE .ll) - if attribution_line - result << %(.in +.5i -.ll -.5i + if attribution_line + result << %(.RS 5 +.ll -.10i #{attribution_line} -.in +.RE .ll) - end - result * LF end + result.join LF + end - alias :sidebar :skip_with_warning - - def stem node - title_element = node.title? ? %(.sp + def convert_stem node + result = [] + result << (node.title? ? %(.sp .B #{manify node.title} -.br) : nil - open, close = BLOCK_MATH_DELIMITERS[node.style.to_sym] - - unless ((equation = node.content).start_with? open) && (equation.end_with? close) - equation = %(#{open}#{equation}#{close}) - end - - %(#{title_element}#{equation}) +.br) : '.sp') + open, close = BLOCK_MATH_DELIMITERS[node.style.to_sym] + if ((equation = node.content).start_with? open) && (equation.end_with? close) + equation = equation.slice open.length, equation.length - open.length - close.length end + result << %(#{manify equation, whitespace: :preserve} (#{node.style})) + result.join LF + end - # FIXME: The reason this method is so complicated is because we are not - # receiving empty(marked) cells when there are colspans or rowspans. This - # method has to create a map of all cells and in the case of rowspans - # create empty cells as placeholders of the span. - # To fix this, asciidoctor needs to provide an API to tell the user if a - # given cell is being used as a colspan or rowspan. - def table node - result = [] - if node.title? - result << %(.sp + # FIXME: The reason this method is so complicated is because we are not + # receiving empty(marked) cells when there are colspans or rowspans. This + # method has to create a map of all cells and in the case of rowspans + # create empty cells as placeholders of the span. + # To fix this, asciidoctor needs to provide an API to tell the user if a + # given cell is being used as a colspan or rowspan. + def convert_table node + result = [] + if node.title? + result << %(.sp .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br -.B #{manify node.captioned_title}) - end - result << '.TS +.B #{manify node.captioned_title} +) + end + result << '.TS allbox tab(:);' - row_header = [] - row_text = [] - row_index = 0 - [:head, :body, :foot].each do |tsec| - node.rows[tsec].each do |row| - row_header[row_index] ||= [] - row_text[row_index] ||= [] - # result << LF - # l left-adjusted - # r right-adjusted - # c centered-adjusted - # n numerical align - # a alphabetic align - # s spanned - # ^ vertically spanned - remaining_cells = row.size - row.each_with_index do |cell, cell_index| - remaining_cells -= 1 - row_header[row_index][cell_index] ||= [] - # Add an empty cell if this is a rowspan cell - if row_header[row_index][cell_index] == ['^t'] - row_text[row_index] << %(T{#{LF}.sp#{LF}T}:) + row_header = [] + row_text = [] + row_index = 0 + node.rows.to_h.each do |tsec, rows| + rows.each do |row| + row_header[row_index] ||= [] + row_text[row_index] ||= [] + # result << LF + # l left-adjusted + # r right-adjusted + # c centered-adjusted + # n numerical align + # a alphabetic align + # s spanned + # ^ vertically spanned + remaining_cells = row.size + row.each_with_index do |cell, cell_index| + remaining_cells -= 1 + row_header[row_index][cell_index] ||= [] + # Add an empty cell if this is a rowspan cell + if row_header[row_index][cell_index] == ['^t'] + row_text[row_index] << %(T{#{LF}.sp#{LF}T}:) + end + row_text[row_index] << %(T{#{LF}.sp#{LF}) + cell_halign = (cell.attr 'halign', 'left').chr + if tsec == :head + if row_header[row_index].empty? || row_header[row_index][cell_index].empty? + row_header[row_index][cell_index] << %(#{cell_halign}tB) + else + row_header[row_index][cell_index + 1] ||= [] + row_header[row_index][cell_index + 1] << %(#{cell_halign}tB) end - row_text[row_index] << %(T{#{LF}.sp#{LF}) - cell_halign = (cell.attr 'halign', 'left')[0..0] - if tsec == :head - if row_header[row_index].empty? || - row_header[row_index][cell_index].empty? - row_header[row_index][cell_index] << %(#{cell_halign}tB) - else - row_header[row_index][cell_index + 1] ||= [] - row_header[row_index][cell_index + 1] << %(#{cell_halign}tB) - end - row_text[row_index] << %(#{cell.text}#{LF}) - elsif tsec == :body - if row_header[row_index].empty? || - row_header[row_index][cell_index].empty? - row_header[row_index][cell_index] << %(#{cell_halign}t) - else - row_header[row_index][cell_index + 1] ||= [] - row_header[row_index][cell_index + 1] << %(#{cell_halign}t) - end - case cell.style - when :asciidoc - cell_content = cell.content - when :verse, :literal - cell_content = cell.text - else - cell_content = cell.content.join - end - row_text[row_index] << %(#{cell_content}#{LF}) - elsif tsec == :foot - if row_header[row_index].empty? || - row_header[row_index][cell_index].empty? - row_header[row_index][cell_index] << %(#{cell_halign}tB) - else - row_header[row_index][cell_index + 1] ||= [] - row_header[row_index][cell_index + 1] << %(#{cell_halign}tB) - end - row_text[row_index] << %(#{cell.text}#{LF}) + row_text[row_index] << %(#{manify cell.text, whitespace: :normalize}#{LF}) + elsif tsec == :body + if row_header[row_index].empty? || row_header[row_index][cell_index].empty? + row_header[row_index][cell_index] << %(#{cell_halign}t) + else + row_header[row_index][cell_index + 1] ||= [] + row_header[row_index][cell_index + 1] << %(#{cell_halign}t) end - if cell.colspan && cell.colspan > 1 - (cell.colspan - 1).times do |i| - if row_header[row_index].empty? || - row_header[row_index][cell_index].empty? - row_header[row_index][cell_index + i] << 'st' - else - row_header[row_index][cell_index + 1 + i] ||= [] - row_header[row_index][cell_index + 1 + i] << 'st' - end - end + case cell.style + when :asciidoc + cell_content = cell.content + when :literal + cell_content = %(.nf#{LF}#{manify cell.text, whitespace: :preserve}#{LF}.fi) + else + cell_content = manify cell.content.join, whitespace: :normalize + end + row_text[row_index] << %(#{cell_content}#{LF}) + elsif tsec == :foot + if row_header[row_index].empty? || row_header[row_index][cell_index].empty? + row_header[row_index][cell_index] << %(#{cell_halign}tB) + else + row_header[row_index][cell_index + 1] ||= [] + row_header[row_index][cell_index + 1] << %(#{cell_halign}tB) end - if cell.rowspan && cell.rowspan > 1 - (cell.rowspan - 1).times do |i| - row_header[row_index + 1 + i] ||= [] - if row_header[row_index + 1 + i].empty? || - row_header[row_index + 1 + i][cell_index].empty? - row_header[row_index + 1 + i][cell_index] ||= [] - row_header[row_index + 1 + i][cell_index] << '^t' - else - row_header[row_index + 1 + i][cell_index + 1] ||= [] - row_header[row_index + 1 + i][cell_index + 1] << '^t' - end + row_text[row_index] << %(#{manify cell.text, whitespace: :normalize}#{LF}) + end + if cell.colspan && cell.colspan > 1 + (cell.colspan - 1).times do |i| + if row_header[row_index].empty? || row_header[row_index][cell_index].empty? + row_header[row_index][cell_index + i] << 'st' + else + row_header[row_index][cell_index + 1 + i] ||= [] + row_header[row_index][cell_index + 1 + i] << 'st' end end - if remaining_cells >= 1 - row_text[row_index] << 'T}:' - else - row_text[row_index] << %(T}#{LF}) + end + if cell.rowspan && cell.rowspan > 1 + (cell.rowspan - 1).times do |i| + row_header[row_index + 1 + i] ||= [] + if row_header[row_index + 1 + i].empty? || row_header[row_index + 1 + i][cell_index].empty? + row_header[row_index + 1 + i][cell_index] ||= [] + row_header[row_index + 1 + i][cell_index] << '^t' + else + row_header[row_index + 1 + i][cell_index + 1] ||= [] + row_header[row_index + 1 + i][cell_index + 1] << '^t' + end end end - row_index += 1 + if remaining_cells >= 1 + row_text[row_index] << 'T}:' + else + row_text[row_index] << %(T}#{LF}) + end end - end + row_index += 1 + end unless rows.empty? + end - #row_header.each do |row| - # result << LF - # row.each_with_index do |cell, i| - # result << (cell.join ' ') - # result << ' ' if row.size > i + 1 - # end - #end - # FIXME temporary fix to get basic table to display - result << LF - result << row_header.first.map {|r| 'lt'}.join(' ') - - result << %(.#{LF}) - row_text.each do |row| - result << row.join - end - result << %(.TE#{LF}.sp) - result.join + #row_header.each do |row| + # result << LF + # row.each_with_index do |cell, i| + # result << (cell.join ' ') + # result << ' ' if row.size > i + 1 + # end + #end + # FIXME temporary fix to get basic table to display + result << LF + result << ('lt ' * row_header[0].size).chop + + result << %(.#{LF}) + row_text.each do |row| + result << row.join end + result << %(.TE#{LF}.sp) + result.join + end - def thematic_break node - '.sp + def convert_thematic_break node + '.sp .ce \l\'\n(.lu*25u/100u\(ap\'' - end + end - alias :toc :skip + alias convert_toc skip - def ulist node - result = [] - result << %(.sp + def convert_ulist node + result = [] + result << %(.sp .B #{manify node.title} .br) if node.title? - node.items.map {|item| - result << %[.sp + node.items.map do |item| + result << %[.sp .RS 4 .ie n \\{\\ \\h'-04'\\(bu\\h'+03'\\c .\\} .el \\{\\ -.sp -1 -.IP \\(bu 2.3 +. sp -1 +. IP \\(bu 2.3 .\\} -#{manify item.text}] - result << item.content if item.blocks? - result << '.RE' - } - result * LF +#{manify item.text, whitespace: :normalize}] + result << item.content if item.blocks? + result << '.RE' end + result.join LF + end - # FIXME git uses [verse] for the synopsis; detect this special case - def verse node - result = [] - if node.title? - result << %(.sp + # FIXME git uses [verse] for the synopsis; detect this special case + def convert_verse node + result = [] + result << (node.title? ? %(.sp .B #{manify node.title} -.br) - end - attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil - attribution_line = (node.attr? 'attribution') ? %[#{attribution_line}\\(em #{node.attr 'attribution'}] : nil - result << %(.sp +.br) : '.sp') + attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil + attribution_line = (node.attr? 'attribution') ? %[#{attribution_line}\\(em #{node.attr 'attribution'}] : nil + result << %(.sp .nf -#{manify node.content} +#{manify node.content, whitespace: :preserve} .fi .br) - if attribution_line - result << %(.in +.5i + if attribution_line + result << %(.in +.5i .ll -.5i #{attribution_line} .in .ll) - end - result * LF end + result.join LF + end - def video node - start_param = (node.attr? 'start', nil, false) ? %(&start=#{node.attr 'start'}) : nil - end_param = (node.attr? 'end', nil, false) ? %(&end=#{node.attr 'end'}) : nil - %(.sp -#{manify node.captioned_title} (video) <#{node.media_uri(node.attr 'target')}#{start_param}#{end_param}>) - end + def convert_video node + start_param = (node.attr? 'start') ? %(&start=#{node.attr 'start'}) : '' + end_param = (node.attr? 'end') ? %(&end=#{node.attr 'end'}) : '' + result = [] + result << (node.title? ? %(.sp +.B #{manify node.title} +.br) : '.sp') + result << %(<#{node.media_uri(node.attr 'target')}#{start_param}#{end_param}> (video)) + result.join LF + end - def inline_anchor node - target = node.target - case node.type - when :link - if (text = node.text) == target - text = nil - else - text = text.gsub '"', %[#{ESC_BS}(dq] - end - if target.start_with? 'mailto:' - macro = 'MTO' - target = target[7..-1].sub '@', %[#{ESC_BS}(at] + def convert_inline_anchor node + target = node.target + case node.type + when :link + if target.start_with? 'mailto:' + macro = 'MTO' + target = target.slice 7, target.length + else + macro = 'URL' + end + if (text = node.text) == target + text = '' + else + text = text.gsub '"', %[#{ESC_BS}(dq] + end + target = target.sub '@', %[#{ESC_BS}(at] if macro == 'MTO' + %(#{ESC_BS}c#{LF}#{ESC_FS}#{macro} "#{target}" "#{text}" ) + when :xref + unless (text = node.text) + refid = node.attributes['refid'] + if AbstractNode === (ref = (@refs ||= node.document.catalog[:refs])[refid]) + text = (ref.xreftext node.attr('xrefstyle', nil, true)) || %([#{refid}]) else - macro = 'URL' + text = %([#{refid}]) end - %(#{ESC_BS}c#{LF}#{ESC_FS}#{macro} "#{target}" "#{text}" ) - when :xref - refid = (node.attr 'refid') || target - node.text || (node.document.references[:ids][refid] || %([#{refid}])) - when :ref, :bibref - # These are anchor points, which shouldn't be visual - '' - else - warn %(asciidoctor: WARNING: unknown anchor type: #{node.type.inspect}) end + text + when :ref, :bibref + # These are anchor points, which shouldn't be visible + '' + else + logger.warn %(unknown anchor type: #{node.type.inspect}) + nil end + end - def inline_break node - %(#{node.text} -.br) - end + def convert_inline_break node + %(#{node.text}#{LF}#{ESC_FS}br) + end - def inline_button node - %(#{ESC_BS}fB[#{ESC_BS}0#{node.text}#{ESC_BS}0]#{ESC_BS}fP) - end + def convert_inline_button node + %(#{ESC_BS}fB[#{ESC_BS}0#{node.text}#{ESC_BS}0]#{ESC_BS}fP) + end - def inline_callout node - %(#{ESC_BS}fB(#{node.text})#{ESC_BS}fP) - end + def convert_inline_callout node + %(#{ESC_BS}fB(#{node.text})#{ESC_BS}fP) + end - # TODO supposedly groff has footnotes, but we're in search of an example - def inline_footnote node - if (index = node.attr 'index') - %([#{index}]) - elsif node.type == :xref - %([#{node.text}]) - end + # TODO supposedly groff has footnotes, but we're in search of an example + def convert_inline_footnote node + if (index = node.attr 'index') + %([#{index}]) + elsif node.type == :xref + %([#{node.text}]) end + end - def inline_image node - # NOTE alt should always be set - alt_text = (node.attr? 'alt') ? (node.attr 'alt') : node.target - (node.attr? 'link') ? %([#{alt_text}] <#{node.attr 'link'}>) : %([#{alt_text}]) - end + def convert_inline_image node + (node.attr? 'link') ? %([#{node.alt}] <#{node.attr 'link'}>) : %([#{node.alt}]) + end - def inline_indexterm node - node.type == :visible ? node.text : '' + def convert_inline_indexterm node + node.type == :visible ? node.text : '' + end + + def convert_inline_kbd node + if (keys = node.attr 'keys').size == 1 + keys[0] + else + keys.join %(#{ESC_BS}0+#{ESC_BS}0) end + end - def inline_kbd node - if (keys = node.attr 'keys').size == 1 - keys[0] - else - keys.join %(#{ESC_BS}0+#{ESC_BS}0) - end + def convert_inline_menu node + caret = %[#{ESC_BS}0#{ESC_BS}(fc#{ESC_BS}0] + menu = node.attr 'menu' + if !(submenus = node.attr 'submenus').empty? + submenu_path = submenus.map {|item| %(#{ESC_BS}fI#{item}#{ESC_BS}fP) }.join caret + %(#{ESC_BS}fI#{menu}#{ESC_BS}fP#{caret}#{submenu_path}#{caret}#{ESC_BS}fI#{node.attr 'menuitem'}#{ESC_BS}fP) + elsif (menuitem = node.attr 'menuitem') + %(#{ESC_BS}fI#{menu}#{caret}#{menuitem}#{ESC_BS}fP) + else + %(#{ESC_BS}fI#{menu}#{ESC_BS}fP) end + end - def inline_menu node - caret = %[#{ESC_BS}0#{ESC_BS}(fc#{ESC_BS}0] - menu = node.attr 'menu' - if !(submenus = node.attr 'submenus').empty? - submenu_path = submenus.map {|item| %(#{ESC_BS}fI#{item}#{ESC_BS}fP) }.join caret - %(#{ESC_BS}fI#{menu}#{ESC_BS}fP#{caret}#{submenu_path}#{caret}#{ESC_BS}fI#{node.attr 'menuitem'}#{ESC_BS}fP) - elsif (menuitem = node.attr 'menuitem') - %(#{ESC_BS}fI#{menu}#{caret}#{menuitem}#{ESC_BS}fP) - else - %(#{ESC_BS}fI#{menu}#{ESC_BS}fP) - end + # NOTE use fake element to prevent creating artificial word boundaries + def convert_inline_quoted node + case node.type + when :emphasis + %(#{ESC_BS}fI#{node.text}#{ESC_BS}fP) + when :strong + %(#{ESC_BS}fB#{node.text}#{ESC_BS}fP) + when :monospaced + %[#{ESC_BS}f(CR#{node.text}#{ESC_BS}fP] + when :single + %[#{ESC_BS}(oq#{node.text}#{ESC_BS}(cq] + when :double + %[#{ESC_BS}(lq#{node.text}#{ESC_BS}(rq] + else + node.text end + end - # NOTE use fake element to prevent creating artificial word boundaries - def inline_quoted node - case node.type - when :emphasis - %(#{ESC_BS}fI#{node.text}#{ESC_BS}fP) - when :strong - %(#{ESC_BS}fB#{node.text}#{ESC_BS}fP) - when :monospaced - %(#{ESC_BS}f[CR]#{node.text}#{ESC_BS}fP) - when :single - %[#{ESC_BS}(oq#{node.text}#{ESC_BS}(cq] - when :double - %[#{ESC_BS}(lq#{node.text}#{ESC_BS}(rq] - else - node.text + def self.write_alternate_pages mannames, manvolnum, target + if mannames && mannames.size > 1 + mannames.shift + manvolext = %(.#{manvolnum}) + dir, basename = ::File.split target + mannames.each do |manname| + ::File.write ::File.join(dir, %(#{manname}#{manvolext})), %(.so #{basename}), mode: FILE_WRITE_MODE end end + end - def resolve_content node - node.content_model == :compound ? node.content : %(.sp#{LF}#{manify node.content}) - end + private + + # Converts HTML entity references back to their original form, escapes + # special man characters and strips trailing whitespace. + # + # It's crucial that text only ever pass through manify once. + # + # str - the String to convert + # opts - an Hash of options to control processing (default: {}) + # * :whitespace an enum that indicates how to handle whitespace; supported options are: + # :preserve - preserve spaces (only expanding tabs); :normalize - normalize whitespace + # (remove spaces around newlines); :collapse - collapse adjacent whitespace to a single + # space (default: :collapse) + # * :append_newline a Boolean that indicates whether to append a newline to the result (default: false) + def manify str, opts = {} + case opts.fetch :whitespace, :collapse + when :preserve + str = str.gsub TAB, ET + when :normalize + str = str.gsub WrappedIndentRx, LF + else + str = str.tr_s WHITESPACE, ' ' + end + str = str. + gsub(LiteralBackslashRx, '\&(rs'). # literal backslash (not a troff escape sequence) + gsub(LeadingPeriodRx, '\\\&.'). # leading . is used in troff for macro call or other formatting; replace with \&. + # drop orphaned \c escape lines, unescape troff macro, quote adjacent character, isolate macro line + gsub(EscapedMacroRx) { (rest = $3.lstrip).empty? ? %(.#$1"#$2") : %(.#$1"#$2"#{LF}#{rest}) }. + gsub('-', '\-'). + gsub('<', '<'). + gsub('>', '>'). + gsub(' ', '\~'). # non-breaking space + gsub('©', '\(co'). # copyright sign + gsub('®', '\(rg'). # registered sign + gsub('™', '\(tm'). # trademark sign + gsub(' ', ' '). # thin space + gsub('–', '\(en'). # en dash + gsub(EmDashCharRefRx, '\(em'). # em dash + gsub('‘', '\(oq'). # left single quotation mark + gsub('’', '\(cq'). # right single quotation mark + gsub('“', '\(lq'). # left double quotation mark + gsub('”', '\(rq'). # right double quotation mark + gsub(EllipsisCharRefRx, '...'). # horizontal ellipsis + gsub('←', '\(<-'). # leftwards arrow + gsub('→', '\(->'). # rightwards arrow + gsub('⇐', '\(lA'). # leftwards double arrow + gsub('⇒', '\(rA'). # rightwards double arrow + gsub('​', '\:'). # zero width space + gsub('&','&'). # literal ampersand (NOTE must take place after any other replacement that includes &) + gsub('\'', '\(aq'). # apostrophe-quote + gsub(MockBoundaryRx, ''). # mock boundary + gsub(ESC_BS, '\\'). # unescape troff backslash (NOTE update if more escapes are added) + gsub(ESC_FS, '.'). # unescape full stop in troff commands (NOTE must take place after gsub(LeadingPeriodRx)) + rstrip # strip trailing space + opts[:append_newline] ? %(#{str}#{LF}) : str end + + def enclose_content node + node.content_model == :compound ? node.content : %(.sp#{LF}#{manify node.content, whitespace: :normalize}) + end +end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/converter/template.rb asciidoctor-2.0.10/lib/asciidoctor/converter/template.rb --- asciidoctor-1.5.5/lib/asciidoctor/converter/template.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/converter/template.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,296 +1,268 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor - # A {Converter} implementation that uses templates composed in template - # languages supported by {https://github.com/rtomayko/tilt Tilt} to convert - # {AbstractNode} objects from a parsed AsciiDoc document tree to the backend - # format. - # - # The converter scans the provided directories for template files that are - # supported by Tilt. If an engine name (e.g., "slim") is specified in the - # options Hash passed to the constructor, the scan is limited to template - # files that have a matching extension (e.g., ".slim"). The scanner trims any - # extensions from the basename of the file and uses the resulting name as the - # key under which to store the template. When the {Converter#convert} method - # is invoked, the transform argument is used to select the template from this - # table and use it to convert the node. - # - # For example, the template file "path/to/templates/paragraph.html.slim" will - # be registered as the "paragraph" transform. The template would then be used - # to convert a paragraph {Block} object from the parsed AsciiDoc tree to an - # HTML backend format (e.g., "html5"). - # - # As an optimization, scan results and templates are cached for the lifetime - # of the Ruby process. If the {https://rubygems.org/gems/thread_safe - # thread_safe} gem is installed, these caches are guaranteed to be thread - # safe. If this gem is not present, they are not and a warning is issued. - class Converter::TemplateConverter < Converter::Base - DEFAULT_ENGINE_OPTIONS = { - :erb => { :trim => '<' }, - # TODO line 466 of haml/compiler.rb sorts the attributes; file an issue to make this configurable - # NOTE AsciiDoc syntax expects HTML/XML output to use double quotes around attribute values - :haml => { :format => :xhtml, :attr_wrapper => '"', :ugly => true, :escape_attrs => false }, - :slim => { :disable_escape => true, :sort_attrs => false, :pretty => false } - } - - # QUESTION are we handling how we load the thread_safe support correctly? - begin - require 'thread_safe' unless defined? ::ThreadSafe - @caches = { :scans => ::ThreadSafe::Cache.new, :templates => ::ThreadSafe::Cache.new } - rescue ::LoadError - @caches = { :scans => {}, :templates => {} } - # FIXME perhaps only warn if the cache option is enabled (meaning not disabled)? - warn 'asciidoctor: WARNING: gem \'thread_safe\' is not installed. This gem is recommended when using custom backend templates.' - end +# A {Converter} implementation that uses templates composed in template +# languages supported by {https://github.com/rtomayko/tilt Tilt} to convert +# {AbstractNode} objects from a parsed AsciiDoc document tree to the backend +# format. +# +# The converter scans the specified directories for template files that are +# supported by Tilt. If an engine name (e.g., "slim") is specified in the +# options Hash passed to the constructor, the scan is restricted to template +# files that have a matching extension (e.g., ".slim"). The scanner trims any +# extensions from the basename of the file and uses the resulting name as the +# key under which to store the template. When the {Converter#convert} method +# is invoked, the transform argument is used to select the template from this +# table and use it to convert the node. +# +# For example, the template file "path/to/templates/paragraph.html.slim" will +# be registered as the "paragraph" transform. The template is then used to +# convert a paragraph {Block} object from the parsed AsciiDoc tree to an HTML +# backend format (e.g., "html5"). +# +# As an optimization, scan results and templates are cached for the lifetime +# of the Ruby process. If the {https://rubygems.org/gems/concurrent-ruby +# concurrent-ruby} gem is installed, these caches are guaranteed to be thread +# safe. If this gem is not present, there is no such guarantee and a warning +# will be issued. +class Converter::TemplateConverter < Converter::Base + DEFAULT_ENGINE_OPTIONS = { + erb: { trim: 0 }, + # TODO line 466 of haml/compiler.rb sorts the attributes; file an issue to make this configurable + # NOTE AsciiDoc syntax expects HTML/XML output to use double quotes around attribute values + haml: { format: :xhtml, attr_wrapper: '"', escape_attrs: false, ugly: true }, + slim: { disable_escape: true, sort_attrs: false, pretty: false }, + } + + begin + require 'concurrent/map' unless defined? ::Concurrent::Map + @caches = { scans: ::Concurrent::Map.new, templates: ::Concurrent::Map.new } + rescue ::LoadError + @caches = { scans: {}, templates: {} } + end - def self.caches - @caches - end + def self.caches + @caches + end - def self.clear_caches - @caches[:scans].clear if @caches[:scans] - @caches[:templates].clear if @caches[:templates] - end + def self.clear_caches + @caches[:scans].clear if @caches[:scans] + @caches[:templates].clear if @caches[:templates] + end - def initialize backend, template_dirs, opts = {} - Helpers.require_library 'tilt' unless defined? ::Tilt - @backend = backend - @templates = {} - @template_dirs = template_dirs - @eruby = opts[:eruby] - @safe = opts[:safe] - @engine = opts[:template_engine] - @engine_options = DEFAULT_ENGINE_OPTIONS.inject({}) do |accum, (engine, default_opts)| - accum[engine] = default_opts.dup - accum - end - if opts[:htmlsyntax] == 'html' - @engine_options[:haml][:format] = :html5 - @engine_options[:slim][:format] = :html - end - if (overrides = opts[:template_engine_options]) - overrides.each do |engine, override_opts| - (@engine_options[engine] ||= {}).update override_opts - end - end - case opts[:template_cache] - when true - @caches = self.class.caches - when ::Hash - @caches = opts[:template_cache] - else - @caches = {} # the empty Hash effectively disables caching - end - scan - #create_handlers + def initialize backend, template_dirs, opts = {} + Helpers.require_library 'tilt' unless defined? ::Tilt.new + @backend = backend + @templates = {} + @template_dirs = template_dirs + @eruby = opts[:eruby] + @safe = opts[:safe] + @active_engines = {} + @engine = opts[:template_engine] + @engine_options = {}.tap {|accum| DEFAULT_ENGINE_OPTIONS.each {|engine, engine_opts| accum[engine] = engine_opts.merge } } + if opts[:htmlsyntax] == 'html' # if not set, assume xml since this converter is also used for DocBook (which doesn't specify htmlsyntax) + @engine_options[:haml][:format] = :html5 + @engine_options[:slim][:format] = :html + end + @engine_options[:slim][:include_dirs] = template_dirs.reverse.map {|dir| ::File.expand_path dir } + if (overrides = opts[:template_engine_options]) + overrides.each do |engine, override_opts| + (@engine_options[engine] ||= {}).update override_opts + end + end + case opts[:template_cache] + when true + logger.warn 'optional gem \'concurrent-ruby\' is not available. This gem is recommended when using the default template cache.' unless defined? ::Concurrent::Map + @caches = self.class.caches + when ::Hash + @caches = opts[:template_cache] + else + @caches = {} # the empty Hash effectively disables caching end + scan + end -=begin - # Public: Called when this converter is added to a composite converter. - def composed parent - # TODO set the backend info determined during the scan + # Public: Convert an {AbstractNode} to the backend format using the named template. + # + # Looks for a template that matches the value of the template name or, if the template name is not specified, the + # value of the {AbstractNode#node_name} property. + # + # node - the AbstractNode to convert + # template_name - the String name of the template to use, or the value of + # the node_name property on the node if a template name is + # not specified. (optional, default: nil) + # opts - an optional Hash that is passed as local variables to the + # template. (optional, default: nil) + # + # Returns the [String] result from rendering the template + def convert node, template_name = nil, opts = nil + unless (template = @templates[template_name ||= node.node_name]) + raise %(Could not find a custom template to handle transform: #{template_name}) end -=end - # Internal: Scans the template directories specified in the constructor for Tilt-supported - # templates, loads the templates and stores the in a Hash that is accessible via the - # {TemplateConverter#templates} method. - # - # Returns nothing - def scan - path_resolver = PathResolver.new - backend = @backend - engine = @engine - @template_dirs.each do |template_dir| - # FIXME need to think about safe mode restrictions here - next unless ::File.directory?(template_dir = (path_resolver.system_path template_dir, nil)) - - # NOTE last matching template wins for template name if no engine is given - file_pattern = '*' - if engine - file_pattern = %(*.#{engine}) - # example: templates/haml - if ::File.directory?(engine_dir = (::File.join template_dir, engine)) - template_dir = engine_dir - end - end + # Slim doesn't include helpers in the template's execution scope (like HAML), so do it ourselves + node.extend ::Slim::Helpers if (defined? ::Slim::Helpers) && (::Slim::Template === template) + + # NOTE opts become locals in the template + if template_name == 'document' + (template.render node, opts).strip + else + (template.render node, opts).rstrip + end + end - # example: templates/html5 or templates/haml/html5 - if ::File.directory?(backend_dir = (::File.join template_dir, backend)) - template_dir = backend_dir - end + # Public: Checks whether there is a Tilt template registered with the specified name. + # + # name - the String template name + # + # Returns a [Boolean] that indicates whether a Tilt template is registered for the + # specified template name. + def handles? name + @templates.key? name + end - pattern = ::File.join template_dir, file_pattern + # Public: Retrieves the templates that this converter manages. + # + # Returns a [Hash] of Tilt template objects keyed by template name. + def templates + @templates.merge + end - if (scan_cache = @caches[:scans]) - template_cache = @caches[:templates] - unless (templates = scan_cache[pattern]) - templates = (scan_cache[pattern] = (scan_dir template_dir, pattern, template_cache)) - end - templates.each do |name, template| - @templates[name] = template_cache[template.file] = template - end - else - @templates.update scan_dir(template_dir, pattern, @caches[:templates]) - end - nil - end + # Public: Registers a Tilt template with this converter. + # + # name - the String template name + # template - the Tilt template object to register + # + # Returns the Tilt template object + def register name, template + @templates[name] = if (template_cache = @caches[:templates]) + template_cache[template.file] = template + else + template end + #create_handler name, template + end -=begin - # Internal: Creates convert methods (e.g., inline_anchor) that delegate to the discovered templates. - # - # Returns nothing - def create_handlers - @templates.each do |name, template| - create_handler name, template - end - nil - end + private - # Internal: Creates a convert method for the specified name that delegates to the specified template. - # - # Returns nothing - def create_handler name, template - metaclass = class << self; self; end - if name == 'document' - metaclass.send :define_method, name do |node| - (template.render node).strip + # Internal: Scans the template directories specified in the constructor for Tilt-supported + # templates, loads the templates and stores the in a Hash that is accessible via the + # {TemplateConverter#templates} method. + # + # Returns nothing + def scan + path_resolver = PathResolver.new + backend = @backend + engine = @engine + @template_dirs.each do |template_dir| + # FIXME need to think about safe mode restrictions here + next unless ::File.directory?(template_dir = (path_resolver.system_path template_dir)) + + if engine + file_pattern = %(*.#{engine}) + # example: templates/haml + if ::File.directory?(engine_dir = %(#{template_dir}/#{engine})) + template_dir = engine_dir end else - metaclass.send :define_method, name do |node| - (template.render node).chomp - end - end - end -=end - - # Public: Convert an {AbstractNode} to the backend format using the named template. - # - # Looks for a template that matches the value of the - # {AbstractNode#node_name} property if a template name is not specified. - # - # node - the AbstractNode to convert - # template_name - the String name of the template to use, or the value of - # the node_name property on the node if a template name is - # not specified. (optional, default: nil) - # opts - an optional Hash that is passed as local variables to the - # template. (optional, default: {}) - # - # Returns the [String] result from rendering the template - def convert node, template_name = nil, opts = {} - template_name ||= node.node_name - unless (template = @templates[template_name]) - raise %(Could not find a custom template to handle transform: #{template_name}) + # NOTE last matching template wins for template name if no engine is given + file_pattern = '*' end - # Slim doesn't include helpers in the template's execution scope (like HAML), so do it ourselves - node.extend ::Slim::Helpers if (defined? ::Slim::Helpers) && (::Slim::Template === template) - - # NOTE opts become locals in the template - if template_name == 'document' - (template.render node, opts).strip - else - (template.render node, opts).chomp + # example: templates/html5 (engine not set) or templates/haml/html5 (engine set) + if ::File.directory?(backend_dir = %(#{template_dir}/#{backend})) + template_dir = backend_dir end - end - # Public: Checks whether there is a Tilt template registered with the specified name. - # - # name - the String template name - # - # Returns a [Boolean] that indicates whether a Tilt template is registered for the - # specified template name. - def handles? name - @templates.key? name - end - - # Public: Retrieves the templates that this converter manages. - # - # Returns a [Hash] of Tilt template objects keyed by template name. - def templates - @templates.dup.freeze - end + pattern = %(#{template_dir}/#{file_pattern}) - # Public: Registers a Tilt template with this converter. - # - # name - the String template name - # template - the Tilt template object to register - # - # Returns the Tilt template object - def register name, template - @templates[name] = if (template_cache = @caches[:templates]) - template_cache[template.file] = template + if (scan_cache = @caches[:scans]) + template_cache = @caches[:templates] + unless (templates = scan_cache[pattern]) + templates = scan_cache[pattern] = scan_dir template_dir, pattern, template_cache + end + templates.each do |name, template| + @templates[name] = template_cache[template.file] = template + end else - template + @templates.update scan_dir(template_dir, pattern, @caches[:templates]) end - #create_handler name, template + nil end + end - # Internal: Scan the specified directory for template files matching pattern and instantiate - # a Tilt template for each matched file. - # - # Returns the scan result as a [Hash] - def scan_dir template_dir, pattern, template_cache = nil - result = {} - eruby_loaded = nil - # Grab the files in the top level of the directory (do not recurse) - ::Dir.glob(pattern).select {|match| ::File.file? match }.each do |file| - if (basename = ::File.basename file) == 'helpers.rb' || (path_segments = basename.split '.').size < 2 - next - end - # TODO we could derive the basebackend from the minor extension of the template file - #name, *rest, ext_name = *path_segments # this form only works in Ruby >= 1.9 - name = path_segments[0] - if name == 'block_ruler' - name = 'thematic_break' - elsif name.start_with? 'block_' - name = name[6..-1] - end - - template_class = ::Tilt - extra_engine_options = {} - case (ext_name = path_segments[-1]) - when 'slim' - # slim doesn't get loaded by Tilt, so we have to load it explicitly - Helpers.require_library 'slim' unless defined? ::Slim - # align safe mode of AsciiDoc embedded in Slim template with safe mode of current document - (@engine_options[:slim][:asciidoc] ||= {})[:safe] ||= @safe if @safe && ::Slim::VERSION >= '3.0' - # load include plugin when using Slim >= 2.1 - require 'slim/include' unless (defined? ::Slim::Include) || ::Slim::VERSION < '2.1' - when 'erb' - template_class, extra_engine_options = (eruby_loaded ||= load_eruby(@eruby)) - when 'rb' + # Internal: Scan the specified directory for template files matching pattern and instantiate + # a Tilt template for each matched file. + # + # Returns the scan result as a [Hash] + def scan_dir template_dir, pattern, template_cache = nil + result, helpers = {}, nil + # Grab the files in the top level of the directory (do not recurse) + ::Dir.glob(pattern).select {|match| ::File.file? match }.each do |file| + if (basename = ::File.basename file) == 'helpers.rb' + helpers = file + next + elsif (path_segments = basename.split '.').size < 2 + next + end + if (name = path_segments[0]) == 'block_ruler' + name = 'thematic_break' + elsif name.start_with? 'block_' + name = name.slice 6, name.length + end + unless template_cache && (template = template_cache[file]) + template_class, extra_engine_options, extsym = ::Tilt, {}, path_segments[-1].to_sym + case extsym + when :slim + unless @active_engines[extsym] + # NOTE slim doesn't get automatically loaded by Tilt + Helpers.require_library 'slim' unless defined? ::Slim::Engine + require 'slim/include' unless defined? ::Slim::Include + ::Slim::Engine.define_options asciidoc: {} + # align safe mode of AsciiDoc embedded in Slim template with safe mode of current document + # NOTE safe mode won't get updated if using template cache and changing safe mode + (@engine_options[extsym][:asciidoc] ||= {})[:safe] ||= @safe if @safe + @active_engines[extsym] = true + end + when :haml + unless @active_engines[extsym] + Helpers.require_library 'haml' unless defined? ::Haml::Engine + # NOTE Haml 5 dropped support for pretty printing + @engine_options[extsym].delete :ugly if defined? ::Haml::TempleEngine + @active_engines[extsym] = true + end + when :erb + template_class, extra_engine_options = (@active_engines[extsym] ||= (load_eruby @eruby)) + when :rb next else - next unless ::Tilt.registered? ext_name + next unless ::Tilt.registered? extsym.to_s end - unless template_cache && (template = template_cache[file]) - template = template_class.new file, 1, (@engine_options[ext_name.to_sym] || {}).merge(extra_engine_options) - end - result[name] = template - end - if ::File.file?(helpers = (::File.join template_dir, 'helpers.rb')) - require helpers + template = template_class.new file, 1, (@engine_options[extsym] ||= {}).merge(extra_engine_options) end - result + result[name] = template + end + if helpers || ::File.file?(helpers = %(#{template_dir}/helpers.rb)) + require helpers end + result + end - # Internal: Load the eRuby implementation - # - # name - the String name of the eRuby implementation - # - # Returns an [Array] containing the Tilt template Class for the eRuby implementation - # and a Hash of additional options to pass to the initializer - def load_eruby name - if !name || name == 'erb' - require 'erb' unless defined? ::ERB - [::Tilt::ERBTemplate, {}] - elsif name == 'erubis' - Helpers.require_library 'erubis' unless defined? ::Erubis::FastEruby - [::Tilt::ErubisTemplate, { :engine_class => ::Erubis::FastEruby }] - else - raise ::ArgumentError, %(Unknown ERB implementation: #{name}) - end + # Internal: Load the eRuby implementation + # + # name - the String name of the eRuby implementation + # + # Returns an [Array] containing the Tilt template Class for the eRuby implementation + # and a Hash of additional options to pass to the initializer + def load_eruby name + if !name || name == 'erb' + require 'erb' unless defined? ::ERB.version + [::Tilt::ERBTemplate, {}] + elsif name == 'erubis' + Helpers.require_library 'erubis' unless defined? ::Erubis::FastEruby + [::Tilt::ErubisTemplate, { engine_class: ::Erubis::FastEruby }] + else + raise ::ArgumentError, %(Unknown ERB implementation: #{name}) end end end +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/converter.rb asciidoctor-2.0.10/lib/asciidoctor/converter.rb --- asciidoctor-1.5.5/lib/asciidoctor/converter.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/converter.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,222 +1,414 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor - # A base module for defining converters that can be used to convert {AbstractNode} - # objects in a parsed AsciiDoc document to a backend format such as HTML or - # DocBook. - # - # Implementing a converter involves: - # - # * including this module in a {Converter} implementation class - # * overriding the {Converter#convert} method - # * optionally associating the converter with one or more backends using - # the {#register_for} DSL method imported by the {Config Converter::Config} module +# A module for defining converters that are used to convert {AbstractNode} objects in a parsed AsciiDoc document to an +# output (aka backend) format such as HTML or DocBook. +# +# A {Converter} is typically instantiated each time an AsciiDoc document is processed (i.e., parsed and converted). +# Implementing a custom converter entails: +# +# * Including the {Converter} module in a converter class and implementing the {Converter#convert} method or extending +# the {Converter::Base Base} class and implementing the dispatch methods that map to each node. +# * Optionally registering the converter with one or more backend names statically using the +register_for+ DSL method +# contributed by the {Converter::Config Config} module. +# +# Examples +# +# class TextConverter +# include Asciidoctor::Converter +# register_for 'text' +# def initialize *args +# super +# outfilesuffix '.txt' +# end +# def convert node, transform = node.node_name, opts = nil +# case transform +# when 'document', 'section' +# [node.title, node.content].join %(\n\n) +# when 'paragraph' +# (node.content.tr ?\n, ' ') << ?\n +# else +# (transform.start_with? 'inline_') ? node.text : node.content +# end +# end +# end +# puts Asciidoctor.convert_file 'sample.adoc', backend: :text, safe: :safe +# +# class Html5Converter < (Asciidoctor::Converter.for 'html5') +# register_for 'html5' +# def convert_paragraph node +# %(

    #{node.content}

    ) +# end +# end +# puts Asciidoctor.convert_file 'sample.adoc', safe: :safe +module Converter + autoload :CompositeConverter, %(#{__dir__}/converter/composite) + autoload :TemplateConverter, %(#{__dir__}/converter/template) + + # Public: The String backend name that this converter is handling. + attr_reader :backend + + # Public: Creates a new instance of this {Converter}. + # + # backend - The String backend name (aka format) to which this converter converts. + # opts - An options Hash (optional, default: {}) + # + # Returns a new [Converter] instance. + def initialize backend, opts = {} + @backend = backend + end + + # Public: Converts an {AbstractNode} using the given transform. + # + # This method must be implemented by a concrete converter class. + # + # node - The concrete instance of AbstractNode to convert. + # transform - An optional String transform that hints at which transformation should be applied to this node. If a + # transform is not given, the transform is often derived from the value of the {AbstractNode#node_name} + # property. (optional, default: nil) + # opts - An optional Hash of options hints about how to convert the node. (optional, default: nil) + # + # Returns the [String] result. + def convert node, transform = nil, opts = nil + raise ::NotImplementedError, %(#{self.class} (backend: #{@backend}) must implement the ##{__method__} method) + end + + # Public: Reports whether the current converter is able to convert this node (by its transform name). Used by the + # {CompositeConverter} to select which converter to use to handle a given node. Returns true by default. + # + # transform - the String name of the node transformation (typically the node name). + # + # Returns a [Boolean] indicating whether this converter can handle the specified transform. + def handles? transform + true + end + + # Public: Derive backend traits (basebackend, filetype, outfilesuffix, htmlsyntax) from the given backend. + # + # backend - the String backend from which to derive the traits + # + # Returns the backend traits for the given backend as a [Hash]. + def self.derive_backend_traits backend + return {} unless backend + if (t_outfilesuffix = DEFAULT_EXTENSIONS[(t_basebackend = backend.sub TrailingDigitsRx, '')]) + t_filetype = t_outfilesuffix.slice 1, t_outfilesuffix.length + else + t_outfilesuffix = %(.#{t_filetype = t_basebackend}) + end + t_filetype == 'html' ? + { basebackend: t_basebackend, filetype: t_filetype, htmlsyntax: 'html', outfilesuffix: t_outfilesuffix } : + { basebackend: t_basebackend, filetype: t_filetype, outfilesuffix: t_outfilesuffix } + end + + module BackendTraits + def basebackend value = nil + value ? (backend_traits[:basebackend] = value) : backend_traits[:basebackend] + end + + def filetype value = nil + value ? (backend_traits[:filetype] = value) : backend_traits[:filetype] + end + + def htmlsyntax value = nil + value ? (backend_traits[:htmlsyntax] = value) : backend_traits[:htmlsyntax] + end + + def outfilesuffix value = nil + value ? (backend_traits[:outfilesuffix] = value) : backend_traits[:outfilesuffix] + end + + def supports_templates value = true + backend_traits[:supports_templates] = value + end + + def supports_templates? + backend_traits[:supports_templates] + end + + def init_backend_traits value = nil + @backend_traits = value || {} + end + + def backend_traits + @backend_traits ||= Converter.derive_backend_traits @backend + end + + alias backend_info backend_traits + + # Deprecated: Use {Converter.derive_backend_traits} instead. + def self.derive_backend_traits backend + Converter.derive_backend_traits backend + end + end + + # A module that contributes the +register_for+ method for registering a converter with the default registry. + module Config + # Public: Registers this {Converter} class with the default registry to handle the specified backend name(s). + # + # backends - One or more String backend names with which to associate this {Converter} class. + # + # Returns nothing. + def register_for *backends + Converter.register self, *(backends.map {|backend| backend.to_s }) + end + end + + # A reusable module for registering and instantiating {Converter Converter} classes used to convert an {AbstractNode} + # to an output (aka backend) format such as HTML or DocBook. + # + # {Converter Converter} objects are instantiated by passing a String backend name and, optionally, an options Hash to + # the {Factory#create} method. The backend can be thought of as an intent to convert a document to a specified format. + # + # Applications interact with the factory either through the global, static registry mixed into the {Converter + # Converter} module or a concrete class that includes this module such as {CustomFactory}. For example: # # Examples # - # class TextConverter - # include Asciidoctor::Converter - # register_for 'text' - # def initialize backend, opts - # super - # outfilesuffix '.txt' - # end - # def convert node, transform = nil - # case (transform ||= node.node_name) - # when 'document' - # node.content - # when 'section' - # [node.title, node.content] * "\n\n" - # when 'paragraph' - # node.content.tr("\n", ' ') << "\n" - # else - # if transform.start_with? 'inline_' - # node.text - # else - # %(<#{transform}>\n) - # end - # end - # end - # end - # - # puts Asciidoctor.convert_file 'sample.adoc', backend: :text - module Converter - # A module that provides the {#register_for} method for statically - # registering a converter with the default {Factory Converter::Factory} instance. - module Config - # Public: Statically registers the current {Converter} class with the default - # {Factory Converter::Factory} to handle conversion to the specified backends. - # - # This method also defines the converts? method on the class which returns whether - # the class is registered to convert a specified backend. - # - # backends - A String Array of backends with which to associate this {Converter} class. - # - # Returns nothing - def register_for *backends - Factory.register self, backends - metaclass = class << self; self; end - if backends == ['*'] - metaclass.send :define_method, :converts? do |name| - true - end + # converter = Asciidoctor::Converter.create 'html5', htmlsyntax: 'xml' + module Factory + # Public: Create an instance of DefaultProxyFactory or CustomFactory, depending on whether the proxy_default keyword + # arg is set (true by default), and optionally seed it with the specified converters map. If proxy_default is set, + # entries in the proxy registry are preferred over matching entries from the default registry. + # + # converters - An optional Hash of converters to use in place of ones in the default registry. The keys are + # backend names and the values are converter classes or instances. + # proxy_default - A Boolean keyword arg indicating whether to proxy the default registry (optional, default: true). + # + # Returns a Factory instance (DefaultFactoryProxy or CustomFactory) seeded with the optional converters map. + def self.new converters = nil, proxy_default: true + proxy_default ? (DefaultFactoryProxy.new converters) : (CustomFactory.new converters) + end + + # Deprecated: Maps the old default factory instance holder to the Converter module. + def self.default *args + Converter + end + + # Deprecated: Maps the create method on the old default factory instance holder to the Converter module. + def self.create backend, opts = {} + default.create backend, opts + end + + # Public: Register a custom converter with this factory to handle conversion for the specified backends. If the + # backend is an asterisk (i.e., +*+), the converter will handle any backend for which a converter is not registered. + # + # converter - The Converter class to register. + # backends - One or more String backend names that this converter should be registered to handle. + # + # Returns nothing + def register converter, *backends + backends.each {|backend| backend == '*' ? (registry.default = converter) : (registry[backend] = converter) } + end + + # Public: Lookup the custom converter registered with this factory to handle the specified backend. + # + # backend - The String backend name. + # + # Returns the [Converter] class registered to convert the specified backend or nil if no match is found. + def for backend + registry[backend] + end + + # Public: Create a new Converter object that can be used to convert {AbstractNode}s to the format associated with + # the backend. This method accepts an optional Hash of options that are passed to the converter's constructor. + # + # If a custom Converter is found to convert the specified backend, it's instantiated (if necessary) and returned + # immediately. If a custom Converter is not found, an attempt is made to find a built-in converter. If the + # +:template_dirs+ key is found in the Hash passed as the second argument, a {CompositeConverter} is created that + # delegates to a {TemplateConverter} and, if found, the built-in converter. If the +:template_dirs+ key is not + # found, the built-in converter is returned or nil if no converter is found. + # + # backend - the String backend name. + # opts - a Hash of options to customize creation; also passed to the converter's constructor: + # :template_dirs - a String Array of directories used to instantiate a {TemplateConverter} (optional). + # :delegate_backend - a backend String of the last converter in the {CompositeConverter} chain (optional). + # + # Returns the [Converter] instance. + def create backend, opts = {} + if (converter = self.for backend) + converter = converter.new backend, opts if ::Class === converter + if (template_dirs = opts[:template_dirs]) && BackendTraits === converter && converter.supports_templates? + CompositeConverter.new backend, (TemplateConverter.new backend, template_dirs, opts), converter, backend_traits_source: converter + else + converter + end + elsif (template_dirs = opts[:template_dirs]) + if (delegate_backend = opts[:delegate_backend]) && (converter = self.for delegate_backend) + converter = converter.new delegate_backend, opts if ::Class === converter + CompositeConverter.new backend, (TemplateConverter.new backend, template_dirs, opts), converter, backend_traits_source: converter else - metaclass.send :define_method, :converts? do |name| - backends.include? name - end + TemplateConverter.new backend, template_dirs, opts end - nil end end - module BackendInfo - def backend_info - @backend_info ||= setup_backend_info - end + # Public: Get the Hash of Converter classes keyed by backend name. Intended for testing only. + def converters + registry.merge + end - def setup_backend_info - raise ::ArgumentError, %(Cannot determine backend for converter: #{self.class}) unless @backend - base = @backend.sub TrailingDigitsRx, '' - if (ext = DEFAULT_EXTENSIONS[base]) - type = ext[1..-1] - else - # QUESTION should we be forcing the basebackend to html if unknown? - base = 'html' - ext = '.html' - type = 'html' - syntax = 'html' - end - { - 'basebackend' => base, - 'outfilesuffix' => ext, - 'filetype' => type, - 'htmlsyntax' => syntax - } + private + + def registry + raise ::NotImplementedError, %(#{Factory} subclass #{self.class} must implement the ##{__method__} method) + end + end + + class CustomFactory + include Factory + + def initialize seed_registry = nil + if seed_registry + seed_registry.default = seed_registry.delete '*' + @registry = seed_registry + else + @registry = {} end + end - def filetype value = nil - if value - backend_info['filetype'] = value + # Public: Unregister all Converter classes that are registered with this factory. Intended for testing only. + # + # Returns nothing. + def unregister_all + registry.clear.default = nil + end + + private + + attr_reader :registry + end + + # Mixed into the {Converter} module to provide the global registry of converters that are registered statically. + # + # This registry includes built-in converters for {Html5Converter HTML 5}, {DocBook5Converter DocBook 5} and + # {ManPageConverter man(ual) page}, as well as any custom converters that have been discovered or explicitly + # registered. Converter registration is synchronized (where applicable) and is thus guaranteed to be thread safe. + module DefaultFactory + include Factory + + private + + @@registry = {} + + def registry + @@registry + end + + unless RUBY_ENGINE == 'opal' # the following block adds support for synchronization and lazy registration + public + + def register converter, *backends + if @@mutex.owned? + backends.each {|backend| backend == '*' ? (@@catch_all = converter) : (@@registry = @@registry.merge backend => converter) } else - backend_info['filetype'] + @@mutex.synchronize { register converter, *backends } end end - def basebackend value = nil - if value - backend_info['basebackend'] = value - else - backend_info['basebackend'] + def unregister_all + @@mutex.synchronize do + @@registry = @@registry.select {|backend| PROVIDED[backend] } + @@catch_all = nil end end - def outfilesuffix value = nil - if value - backend_info['outfilesuffix'] = value - else - backend_info['outfilesuffix'] + def for backend + @@registry.fetch backend do + PROVIDED[backend] ? (@@mutex.synchronize do + # require is thread-safe, so no reason to refetch + require PROVIDED[backend] + @@registry[backend] + end) : catch_all end end - def htmlsyntax value = nil - if value - backend_info['htmlsyntax'] = value - else - backend_info['htmlsyntax'] - end + PROVIDED = { + 'docbook5' => %(#{__dir__}/converter/docbook5), + 'html5' => %(#{__dir__}/converter/html5), + 'manpage' => %(#{__dir__}/converter/manpage), + } + + private + + def catch_all + @@catch_all end + + @@catch_all = nil + @@mutex = ::Mutex.new end + end + + class DefaultFactoryProxy < CustomFactory + include DefaultFactory # inserts module into ancestors immediately after superclass + + unless RUBY_ENGINE == 'opal' + def unregister_all + super + @registry.clear.default = nil + end - class << self - # Mixes the {Config Converter::Config} module into any class that includes the {Converter} module. - # - # converter - The Class that includes the {Converter} module - # - # Returns nothing - def included converter - converter.extend Config + def for backend + @registry.fetch(backend) { super } + end + + private + + def catch_all + @registry.default || super end end + end - include Config - include BackendInfo + # Internal: Mixes the {Config} module into any class that includes the {Converter} module. Additionally, mixes the + # {BackendTraits} method into instances of this class. + # + # into - The Class into which the {Converter} module is being included. + # + # Returns nothing. + private_class_method def self.included into + into.send :include, BackendTraits + into.extend Config + end || :included + + # An abstract base class for defining converters that can be used to convert {AbstractNode} objects in a parsed + # AsciiDoc document to a backend format such as HTML or DocBook. + class Base + include Converter, Logging - # Public: Creates a new instance of Converter + # Public: Converts an {AbstractNode} by delegating to a method that matches the transform value. # - # backend - The String backend format to which this converter converts. - # opts - An options Hash (optional, default: {}) + # This method looks for a method whose name matches the transform prefixed with "convert_" to dispatch to. If the + # +opts+ argument is non-nil, this method assumes the dispatch method accepts two arguments, the node and an options + # Hash. The options Hash may be used by converters to delegate back to the top-level converter. Currently, this + # feature is used for the outline transform. If the +opts+ argument is nil, this method assumes the dispatch method + # accepts the node as its only argument. # - # Returns a new instance of [Converter] - def initialize backend, opts = {} - @backend = backend - setup_backend_info + # See {Converter#convert} for details about the arguments and return value. + def convert node, transform = node.node_name, opts = nil + opts ? (send 'convert_' + transform, node, opts) : (send 'convert_' + transform, node) + rescue + raise unless ::NoMethodError === (ex = $!) && ex.receiver == self && ex.name.to_s == transform + logger.warn %(missing convert handler for #{ex.name} node in #{@backend} backend (#{self.class})) + nil end -=begin - # Public: Invoked when this converter is added to the chain of converters in a {CompositeConverter}. - # - # owner - The CompositeConverter instance - # - # Returns nothing - def composed owner + def handles? transform + respond_to? %(convert_#{transform}) end -=end - # Public: Converts an {AbstractNode} using the specified transform along - # with additional options. If a transform is not specified, implementations - # typically derive one from the {AbstractNode#node_name} property. - # - # Implementations are free to decide how to carry out the conversion. In - # the case of the built-in converters, the tranform value is used to - # dispatch to a handler method. The {TemplateConverter} uses the value of - # the transform to select a template to render. - # - # node - The concrete instance of AbstractNode to convert - # transform - An optional String transform that hints at which transformation - # should be applied to this node. If a transform is not specified, - # the transform is typically derived from the value of the - # node's node_name property. (optional, default: nil) - # opts - An optional Hash of options that provide additional hints about - # how to convert the node. (optional, default: {}) + # Public: Converts the {AbstractNode} using only its converted content. # - # Returns the [String] result - def convert node, transform = nil, opts = {} - raise ::NotImplementedError + # Returns the converted [String] content. + def content_only node + node.content end - # Alias for backward compatibility. - alias :convert_with_options :convert - end - - # A module that can be used to mix the {#write} method into a {Converter} - # implementation to allow the converter to control how the output is written - # to disk. - module Writer - # Public: Writes the output to the specified target file name or stream. + # Public: Skips conversion of the {AbstractNode}. # - # output - The output String to write - # target - The String file name or stream object to which the output should - # be written. - # - # Returns nothing - def write output, target - if target.respond_to? :write - target.write output.chomp - # ensure there's a trailing endline to be nice to terminals - target.write EOL - else - ::File.open(target, 'w') {|f| f.write output } - end - nil - end + # Returns nothing. + def skip node; end end - module VoidWriter - include Writer - # Public: Does not write output - def write output, target - end - end + extend DefaultFactory # exports static methods +end end - -require 'asciidoctor/converter/base' -require 'asciidoctor/converter/factory' diff -Nru asciidoctor-1.5.5/lib/asciidoctor/convert.rb asciidoctor-2.0.10/lib/asciidoctor/convert.rb --- asciidoctor-1.5.5/lib/asciidoctor/convert.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/convert.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,193 @@ +module Asciidoctor + module_function + + # Public: Parse the AsciiDoc source input into an Asciidoctor::Document and + # convert it to the specified backend format. + # + # Accepts input as an IO (or StringIO), String or String Array object. If the + # input is a File, the object is expected to be opened for reading and is not + # closed afterwards by this method. Information about the file (filename, + # directory name, etc) gets assigned to attributes on the Document object. + # + # If the :to_file option is true, and the input is a File, the output is + # written to a file adjacent to the input file, having an extension that + # corresponds to the backend format. Otherwise, if the :to_file option is + # specified, the file is written to that file. If :to_file is not an absolute + # path, it is resolved relative to :to_dir, if given, otherwise the + # Document#base_dir. If the target directory does not exist, it will not be + # created unless the :mkdirs option is set to true. If the file cannot be + # written because the target directory does not exist, or because it falls + # outside of the Document#base_dir in safe mode, an IOError is raised. + # + # If the output is going to be written to a file, the header and footer are + # included unless specified otherwise (writing to a file implies creating a + # standalone document). Otherwise, the header and footer are not included by + # default and the converted result is returned. + # + # input - the String AsciiDoc source filename + # options - a String, Array or Hash of options to control processing (default: {}) + # String and Array values are converted into a Hash. + # See Asciidoctor::Document#initialize for details about options. + # + # Returns the Document object if the converted String is written to a + # file, otherwise the converted String + def convert input, options = {} + (options = options.merge).delete :parse + to_dir = options.delete :to_dir + mkdirs = options.delete :mkdirs + + case (to_file = options.delete :to_file) + when true, nil + unless (write_to_target = to_dir) + sibling_path = ::File.absolute_path input.path if ::File === input + end + to_file = nil + when false + to_file = nil + when '/dev/null' + return load input, options + else + options[:to_file] = write_to_target = to_file unless (stream_output = to_file.respond_to? :write) + end + + unless options.key? :standalone + if sibling_path || write_to_target + options[:standalone] = options.fetch :header_footer, true + elsif options.key? :header_footer + options[:standalone] = options[:header_footer] + end + end + + # NOTE outfile may be controlled by document attributes, so resolve outfile after loading + if sibling_path + options[:to_dir] = outdir = ::File.dirname sibling_path + elsif write_to_target + if to_dir + if to_file + options[:to_dir] = ::File.dirname ::File.expand_path ::File.join to_dir, to_file + else + options[:to_dir] = ::File.expand_path to_dir + end + elsif to_file + options[:to_dir] = ::File.dirname ::File.expand_path to_file + end + end + + # NOTE :to_dir is always set when outputting to a file + # NOTE :to_file option only passed if assigned an explicit path + doc = load input, options + + if sibling_path # write to file in same directory + outfile = ::File.join outdir, %(#{doc.attributes['docname']}#{doc.outfilesuffix}) + raise ::IOError, %(input file and output file cannot be the same: #{outfile}) if outfile == sibling_path + elsif write_to_target # write to explicit file or directory + working_dir = (options.key? :base_dir) ? (::File.expand_path options[:base_dir]) : ::Dir.pwd + # QUESTION should the jail be the working_dir or doc.base_dir??? + jail = doc.safe >= SafeMode::SAFE ? working_dir : nil + if to_dir + outdir = doc.normalize_system_path(to_dir, working_dir, jail, target_name: 'to_dir', recover: false) + if to_file + outfile = doc.normalize_system_path(to_file, outdir, nil, target_name: 'to_dir', recover: false) + # reestablish outdir as the final target directory (in the case to_file had directory segments) + outdir = ::File.dirname outfile + else + outfile = ::File.join outdir, %(#{doc.attributes['docname']}#{doc.outfilesuffix}) + end + elsif to_file + outfile = doc.normalize_system_path(to_file, working_dir, jail, target_name: 'to_dir', recover: false) + # establish outdir as the final target directory (in the case to_file had directory segments) + outdir = ::File.dirname outfile + end + + if ::File === input && outfile == (::File.absolute_path input.path) + raise ::IOError, %(input file and output file cannot be the same: #{outfile}) + end + + if mkdirs + Helpers.mkdir_p outdir + else + # NOTE we intentionally refer to the directory as it was passed to the API + raise ::IOError, %(target directory does not exist: #{to_dir} (hint: set :mkdirs option)) unless ::File.directory? outdir + end + else # write to stream + outfile = to_file + outdir = nil + end + + if outfile && !stream_output + output = doc.convert 'outfile' => outfile, 'outdir' => outdir + else + output = doc.convert + end + + if outfile + doc.write output, outfile + + # NOTE document cannot control this behavior if safe >= SafeMode::SERVER + # NOTE skip if stylesdir is a URI + if !stream_output && doc.safe < SafeMode::SECURE && (doc.attr? 'linkcss') && (doc.attr? 'copycss') && + (doc.basebackend? 'html') && !((stylesdir = (doc.attr 'stylesdir')) && (Helpers.uriish? stylesdir)) + if (stylesheet = doc.attr 'stylesheet') + if DEFAULT_STYLESHEET_KEYS.include? stylesheet + copy_asciidoctor_stylesheet = true + elsif !(Helpers.uriish? stylesheet) + copy_user_stylesheet = true + end + end + copy_syntax_hl_stylesheet = (syntax_hl = doc.syntax_highlighter) && (syntax_hl.write_stylesheet? doc) + if copy_asciidoctor_stylesheet || copy_user_stylesheet || copy_syntax_hl_stylesheet + stylesoutdir = doc.normalize_system_path(stylesdir, outdir, doc.safe >= SafeMode::SAFE ? outdir : nil) + if mkdirs + Helpers.mkdir_p stylesoutdir + else + raise ::IOError, %(target stylesheet directory does not exist: #{stylesoutdir} (hint: set :mkdirs option)) unless ::File.directory? stylesoutdir + end + + if copy_asciidoctor_stylesheet + Stylesheets.instance.write_primary_stylesheet stylesoutdir + # FIXME should Stylesheets also handle the user stylesheet? + elsif copy_user_stylesheet + if (stylesheet_src = doc.attr 'copycss').empty? + stylesheet_src = doc.normalize_system_path stylesheet + else + # NOTE in this case, copycss is a source location (but cannot be a URI) + stylesheet_src = doc.normalize_system_path stylesheet_src + end + stylesheet_dest = doc.normalize_system_path stylesheet, stylesoutdir, (doc.safe >= SafeMode::SAFE ? outdir : nil) + # NOTE don't warn if src can't be read and dest already exists (see #2323) + if stylesheet_src != stylesheet_dest && (stylesheet_data = doc.read_asset stylesheet_src, + warn_on_failure: !(::File.file? stylesheet_dest), label: 'stylesheet') + ::File.write stylesheet_dest, stylesheet_data, mode: FILE_WRITE_MODE + end + end + syntax_hl.write_stylesheet doc, stylesoutdir if copy_syntax_hl_stylesheet + end + end + doc + else + output + end + end + + # Public: Parse the contents of the AsciiDoc source file into an + # Asciidoctor::Document and convert it to the specified backend format. + # + # input - the String AsciiDoc source filename + # options - a String, Array or Hash of options to control processing (default: {}) + # String and Array values are converted into a Hash. + # See Asciidoctor::Document#initialize for details about options. + # + # Returns the Document object if the converted String is written to a + # file, otherwise the converted String + def convert_file filename, options = {} + ::File.open(filename, FILE_READ_MODE) {|file| convert file, options } + end + + # Deprecated: Use {Asciidoctor.convert} instead. + alias render convert + module_function :render + + # Deprecated: Use {Asciidoctor.convert_file} instead. + alias render_file convert_file + module_function :render_file +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/string/chr.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext/1.8.7/string/chr.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/string/chr.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext/1.8.7/string/chr.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -# Educate Ruby 1.8.7 about the String#chr method. -class String - def chr - self[0..0] - end unless method_defined? :chr -end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/string/limit.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext/1.8.7/string/limit.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/string/limit.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext/1.8.7/string/limit.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -if RUBY_ENGINE_JRUBY - class String - # Safely truncate the string to the specified number of bytes. - # If a multibyte char gets split, the dangling fragment is removed. - def limit size - return self unless size < bytesize - result = (unpack %(a#{size}))[0] - begin - result.unpack 'U*' - rescue ArgumentError - result.chop! - retry - end - result - end unless method_defined? :limit - end -else - class String - # Safely truncate the string to the specified number of bytes. - # If a multibyte char gets split, the dangling fragment is removed. - def limit size - return self unless size < bytesize - result = (unpack %(a#{size}))[0] - result.chop! until result.empty? || /.$/u =~ result - result - end unless method_defined? :limit - end -end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/symbol/length.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext/1.8.7/symbol/length.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/symbol/length.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext/1.8.7/symbol/length.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -# Educate Ruby 1.8.7 about the Symbol#length method. -class Symbol - def length - to_s.length - end unless method_defined? :length -end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext/float/truncate.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext/float/truncate.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext/float/truncate.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext/float/truncate.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,20 @@ +# frozen_string_literal: true +# NOTE remove once minimum required Ruby version is at least 2.4 +# NOTE use `send :prepend` to be nice to Ruby 2.0 +Float.send :prepend, (Module.new do + def truncate *args + if args.length == 1 + if (precision = Integer args.shift) == 0 + super + elsif precision > 0 + precision_factor = 10.0 ** precision + (self * precision_factor).to_i / precision_factor + else + precision_factor = 10 ** precision.abs + (self / precision_factor).to_i * precision_factor + end + else + super + end + end +end) if (Float.instance_method :truncate).arity == 0 diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext/hash/merge.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext/hash/merge.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext/hash/merge.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext/hash/merge.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,8 @@ +# frozen_string_literal: true +# NOTE remove once minimum required Ruby version is at least 2.6 +# NOTE use `send :prepend` to be nice to Ruby 2.0 +Hash.send :prepend, (Module.new do + def merge *args + (len = args.length) < 1 ? super({}) : (len > 1 ? args.inject(self) {|acc, arg| acc.merge arg } : (super args[0])) + end +end) if (Hash.instance_method :merge).arity == 1 diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext/match_data/names.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext/match_data/names.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext/match_data/names.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext/match_data/names.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,7 @@ +# frozen_string_literal: true +# NOTE remove once implemented in Opal; see https://github.com/opal/opal/issues/1964 +class MatchData + def names + [] + end +end unless MatchData.method_defined? :names diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext/nil_or_empty.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext/nil_or_empty.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext/nil_or_empty.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext/nil_or_empty.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,23 +1,24 @@ +# frozen_string_literal: true # A core library extension that defines the method nil_or_empty? as an alias to # optimize checks for nil? or empty? on common object types such as NilClass, # String, Array, Hash, and Numeric. class NilClass - alias :nil_or_empty? :nil? unless method_defined? :nil_or_empty? + alias nil_or_empty? nil? unless method_defined? :nil_or_empty? end class String - alias :nil_or_empty? :empty? unless method_defined? :nil_or_empty? + alias nil_or_empty? empty? unless method_defined? :nil_or_empty? end class Array - alias :nil_or_empty? :empty? unless method_defined? :nil_or_empty? + alias nil_or_empty? empty? unless method_defined? :nil_or_empty? end class Hash - alias :nil_or_empty? :empty? unless method_defined? :nil_or_empty? + alias nil_or_empty? empty? unless method_defined? :nil_or_empty? end class Numeric - alias :nil_or_empty? :nil? unless method_defined? :nil_or_empty? + alias nil_or_empty? nil? unless method_defined? :nil_or_empty? end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext/regexp/is_match.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext/regexp/is_match.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext/regexp/is_match.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext/regexp/is_match.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,5 @@ +# frozen_string_literal: true +# NOTE remove once minimum required Ruby version is at least 2.4 +class Regexp + alias match? === +end unless Regexp.method_defined? :match? diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext/string/limit.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext/string/limit.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext/string/limit.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext/string/limit.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -class String - # Safely truncate the string to the specified number of bytes. - # If a multibyte char gets split, the dangling fragment is removed. - def limit size - return self unless size < bytesize - # NOTE JRuby 1.7 & Rubinius fail to detect invalid encoding unless encoding is forced; impact is marginal. - size -= 1 until ((result = byteslice 0, size).force_encoding ::Encoding::UTF_8).valid_encoding? - result - end unless method_defined? :limit -end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/core_ext.rb asciidoctor-2.0.10/lib/asciidoctor/core_ext.rb --- asciidoctor-1.5.5/lib/asciidoctor/core_ext.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/core_ext.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,8 +1,9 @@ -require 'asciidoctor/core_ext/nil_or_empty' -if RUBY_MIN_VERSION_1_9 - require 'asciidoctor/core_ext/string/limit' -elsif RUBY_ENGINE != 'opal' - require 'asciidoctor/core_ext/1.8.7/string/chr' - require 'asciidoctor/core_ext/1.8.7/string/limit' - require 'asciidoctor/core_ext/1.8.7/symbol/length' +# frozen_string_literal: true +require_relative 'core_ext/nil_or_empty' +require_relative 'core_ext/hash/merge' +if RUBY_ENGINE == 'opal' + require_relative 'core_ext/match_data/names' +else + require_relative 'core_ext/float/truncate' + require_relative 'core_ext/regexp/is_match' end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/document.rb asciidoctor-2.0.10/lib/asciidoctor/document.rb --- asciidoctor-1.5.5/lib/asciidoctor/document.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/document.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,24 +1,91 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor -# Public: Methods for parsing and converting AsciiDoc documents. +# Public: The Document class represents a parsed AsciiDoc document. # -# There are several strategies for getting the title of the document: +# Document is the root node of a parsed AsciiDoc document. It provides an +# abstract syntax tree (AST) that represents the structure of the AsciiDoc +# document from which the Document object was parsed. # -# doctitle - value of title attribute, if assigned and non-empty, -# otherwise title of first section in document, if present -# otherwise nil -# name - an alias of doctitle -# title - value of the title attribute, or nil if not present -# first_section.title - title of first section in document, if present -# header.title - title of section level 0 +# Although the constructor can be used to create an empty document object, more +# commonly, you'll load the document object from AsciiDoc source using the +# primary API methods, {Asciidoctor.load} or {Asciidoctor.load_file}. When +# using one of these APIs, you almost always want to set the safe mode to +# :safe (or :unsafe) to enable all of Asciidoctor's features. # -# Keep in mind that you'll want to honor these document settings: +# Asciidoctor.load '= Hello, AsciiDoc!', safe: :safe +# # => Asciidoctor::Document { doctype: "article", doctitle: "Hello, AsciiDoc!", blocks: 0 } # -# notitle - The h1 heading should not be shown -# noheader - The header block (h1 heading, author, revision info) should not be shown -# nofooter - the footer block should not be shown +# Instances of this class can be used to extract information from the document +# or alter its structure. As such, the Document object is most often used in +# extensions and by integrations. +# +# The most basic usage of the Document object is to retrieve the document's +# title. +# +# source = '= Document Title' +# document = Asciidoctor.load source, safe: :safe +# document.doctitle +# # => 'Document Title' +# +# If the document has no title, the {Document#doctitle} method returns the +# title of the first section. If that check falls through, you can have the +# method return a fallback value (the value of the untitled-label attribute). +# +# Asciidoctor.load('no doctitle', safe: :safe).doctitle use_fallback: true +# # => "Untitled" +# +# You can also use the Document object to access document attributes defined in +# the header, such as the author and doctype. +# +# source = '= Document Title +# Author Name +# :doctype: book' +# document = Asciidoctor.load source, safe: :safe +# document.author +# # => 'Author Name' +# document.doctype +# # => 'book' +# +# You can retrieve arbitrary document attributes defined in the header using +# {Document#attr} or check for the existence of one using {Document#attr?}: +# +# source = '= Asciidoctor +# :uri-project: https://asciidoctor.org' +# document = Asciidoctor.load source, safe: :safe +# document.attr 'uri-project' +# # => 'https://asciidoctor.org' +# document.attr? 'icons' +# # => false +# +# Starting at the Document object, you can begin walking the document tree using +# the {Document#blocks} method: +# +# source = 'paragraph contents +# +# [sidebar] +# sidebar contents' +# doc = Asciidoctor.load source, safe: :safe +# doc.blocks.map {|block| block.context } +# # => [:paragraph, :sidebar] +# +# You can discover block nodes at any depth in the tree using the +# {AbstractBlock#find_by} method. +# +# source = '**** +# paragraph in sidebar +# ****' +# doc = Asciidoctor.load source, safe: :safe +# doc.find_by(context: :paragraph).map {|block| block.context } +# # => [:paragraph] +# +# Loading a document object is the first step in the conversion process. You +# can take the process to completion by calling the {Document#convert} method. class Document < AbstractBlock + ImageReference = ::Struct.new :target, :imagesdir do + alias to_s target + end + Footnote = ::Struct.new :index, :id, :text class AttributeEntry @@ -32,20 +99,21 @@ def save_to block_attributes (block_attributes[:attribute_entries] ||= []) << self + self end end # Public Parsed and stores a partitioned title (i.e., title & subtitle). class Title attr_reader :main - alias :title :main + alias title main attr_reader :subtitle attr_reader :combined def initialize val, opts = {} # TODO separate sanitization by type (:cdata for HTML/XML, :plain_text for non-SGML, false for none) if (@sanitized = opts[:sanitize]) && val.include?('<') - val = val.gsub(XmlSanitizeRx, '').tr_s(' ', ' ').strip + val = val.gsub(XmlSanitizeRx, '').squeeze(' ').strip end if (sep = opts[:separator] || ':').empty? || !val.include?(sep = %(#{sep} )) @main = val @@ -61,7 +129,7 @@ end def subtitle? - !!@subtitle + @subtitle ? true : false end def to_s @@ -69,6 +137,9 @@ end end + # Public: The Author class represents information about an author extracted from document attributes + Author = ::Struct.new :name, :firstname, :middlename, :lastname, :initials, :email + # Public A read-only integer value indicating the level of security that # should be enforced while processing this document. The value must be # set in the Document constructor using the :safe option. @@ -78,25 +149,25 @@ # # A value of 1 (SAFE) closely parallels safe mode in AsciiDoc. In particular, # it prevents access to files which reside outside of the parent directory - # of the source file and disables any macro other than the include macro. + # of the source file and disables any macro other than the include directive. # # A value of 10 (SERVER) disallows the document from setting attributes that # would affect the conversion of the document, in addition to all the security - # features of SafeMode::SAFE. For instance, this value disallows changing the - # backend or the source-highlighter using an attribute defined in the source - # document. This is the most fundamental level of security for server-side + # features of SafeMode::SAFE. For instance, this level forbids changing the + # backend or source-highlighter using an attribute defined in the source + # document header. This is the most fundamental level of security for server # deployments (hence the name). # # A value of 20 (SECURE) disallows the document from attempting to read files # from the file system and including the contents of them into the document, # in addition to all the security features of SafeMode::SECURE. In - # particular, it disallows use of the include::[] macro and the embedding of + # particular, it disallows use of the include::[] directive and the embedding of # binary content (data uri), stylesheets and JavaScripts referenced by the # document. (Asciidoctor and trusted extensions may still be allowed to embed # trusted content into the document). # # Since Asciidoctor is aiming for wide adoption, 20 (SECURE) is the default - # value and is recommended for server-side deployments. + # value and is recommended for server deployments. # # A value of 100 (PARANOID) is planned to disallow the use of passthrough # macros and prevents the document from setting any known attributes in @@ -115,19 +186,25 @@ # attr_reader :compat_mode - # Public: Get the Boolean flag that indicates whether source map information is tracked by the parser - attr_reader :sourcemap + # Public: Get the cached value of the backend attribute for this document + attr_reader :backend - # Public: Get the Hash of document references - attr_reader :references + # Public: Get the cached value of the doctype attribute for this document + attr_reader :doctype + + # Public: Get or set the Boolean flag that indicates whether source map information should be tracked by the parser + attr_accessor :sourcemap + + # Public: Get the document catalog Hash + attr_reader :catalog + + # Public: Alias catalog property as references for backwards compatiblity + alias references catalog # Public: Get the Hash of document counters attr_reader :counters - # Public: Get the Hash of callouts - attr_reader :callouts - - # Public: Get the level-0 Section + # Public: Get the level-0 Section (i.e., doctitle). (Only stores the title, not the header attributes). attr_reader :header # Public: Get the String base directory for converting this document. @@ -148,17 +225,23 @@ # Public: Get the Reader associated with this document attr_reader :reader + # Public: Get/Set the PathResolver instance used to resolve paths in this Document. + attr_reader :path_resolver + # Public: Get the Converter associated with this document attr_reader :converter - # Public: Get the extensions registry + # Public: Get the SyntaxHighlighter associated with this document + attr_reader :syntax_highlighter + + # Public: Get the activated Extensions::Registry associated with this document. attr_reader :extensions # Public: Initialize a {Document} object. # # data - The AsciiDoc source data as a String or String Array. (default: nil) # options - A Hash of options to control processing (e.g., safe mode value (:safe), backend (:backend), - # header/footer toggle (:header_footer), custom attributes (:attributes)). (default: {}) + # standalone enclosure (:standalone), custom attributes (:attributes)). (default: {}) # # Duplication of the options Hash is handled in the enclosing API. # @@ -173,55 +256,62 @@ if (parent_doc = options.delete :parent) @parent_document = parent_doc options[:base_dir] ||= parent_doc.base_dir - @references = parent_doc.references.inject({}) do |accum, (key,ref)| - if key == :footnotes - accum[:footnotes] = [] - else - accum[key] = ref - end - accum - end - @callouts = parent_doc.callouts + options[:catalog_assets] = true if parent_doc.options[:catalog_assets] + @catalog = parent_doc.catalog.merge footnotes: [] # QUESTION should we support setting attribute in parent document from nested document? # NOTE we must dup or else all the assignments to the overrides clobbers the real attributes - attr_overrides = parent_doc.attributes.dup - ['doctype', 'compat-mode', 'toc', 'toc-placement', 'toc-position'].each do |key| - attr_overrides.delete key - end - @attribute_overrides = attr_overrides + @attribute_overrides = attr_overrides = parent_doc.attributes.merge + parent_doctype = attr_overrides.delete 'doctype' + attr_overrides.delete 'compat-mode' + attr_overrides.delete 'toc' + attr_overrides.delete 'toc-placement' + attr_overrides.delete 'toc-position' @safe = parent_doc.safe - @compat_mode = parent_doc.compat_mode + @attributes['compat-mode'] = '' if (@compat_mode = parent_doc.compat_mode) + @outfilesuffix = parent_doc.outfilesuffix @sourcemap = parent_doc.sourcemap + @timings = nil + @path_resolver = parent_doc.path_resolver @converter = parent_doc.converter - initialize_extensions = false + initialize_extensions = nil @extensions = parent_doc.extensions + @syntax_highlighter = parent_doc.syntax_highlighter else @parent_document = nil - @references = { - :ids => {}, - :footnotes => [], - :links => [], - :images => [], - :indexterms => [], - :includes => ::Set.new, + @catalog = { + ids: {}, # deprecated; kept for backwards compatibility with converters + refs: {}, + footnotes: [], + links: [], + images: [], + #indexterms: [], + callouts: Callouts.new, + includes: {}, } - @callouts = Callouts.new # copy attributes map and normalize keys # attribute overrides are attributes that can only be set from the commandline # a direct assignment effectively makes the attribute a constant # a nil value or name with leading or trailing ! will result in the attribute being unassigned - attr_overrides = {} - (options[:attributes] || {}).each do |key, value| - if key.start_with? '!' - key = key[1..-1] - value = nil + @attribute_overrides = attr_overrides = {} + (options[:attributes] || {}).each do |key, val| + if key.end_with? '@' + if key.start_with? '!' + key, val = (key.slice 1, key.length - 2), false + elsif key.end_with? '!@' + key, val = (key.slice 0, key.length - 2), false + else + key, val = key.chop, %(#{val}@) + end + elsif key.start_with? '!' + key, val = (key.slice 1, key.length), val == '@' ? false : nil elsif key.end_with? '!' - key = key.chop - value = nil + key, val = key.chop, val == '@' ? false : nil end - attr_overrides[key.downcase] = value + attr_overrides[key.downcase] = val + end + if (to_file = options[:to_file]) + attr_overrides['outfilesuffix'] = Helpers.extname to_file end - @attribute_overrides = attr_overrides # safely resolve the safe mode from const, int or string if !(safe_mode = options[:safe]) @safe = SafeMode::SECURE @@ -229,37 +319,40 @@ # be permissive in case API user wants to define new levels @safe = safe_mode else - # NOTE: not using infix rescue for performance reasons, see https://github.com/jruby/jruby/issues/1816 - begin - @safe = SafeMode.const_get(safe_mode.to_s.upcase) - rescue - @safe = SafeMode::SECURE - end + @safe = (SafeMode.value_for_name safe_mode) rescue SafeMode::SECURE end + input_mtime = options.delete :input_mtime @compat_mode = attr_overrides.key? 'compat-mode' @sourcemap = options[:sourcemap] - @converter = nil - initialize_extensions = defined? ::Asciidoctor::Extensions - @extensions = nil # initialize furthur down + @timings = options.delete :timings + @path_resolver = PathResolver.new + initialize_extensions = (defined? ::Asciidoctor::Extensions) ? true : nil + @extensions = nil # initialize furthur down if initialize_extensions is true + options[:standalone] = options[:header_footer] if (options.key? :header_footer) && !(options.key? :standalone) end - @parsed = false - @header = nil + @parsed = @reftexts = @header = @header_attributes = nil @counters = {} @attributes_modified = ::Set.new - @options = options @docinfo_processor_extensions = {} - header_footer = (options[:header_footer] ||= false) - options.freeze + standalone = options[:standalone] + (@options = options).freeze attrs = @attributes #attrs['encoding'] = 'UTF-8' attrs['sectids'] = '' - attrs['notitle'] = '' unless header_footer attrs['toc-placement'] = 'auto' + if standalone + attrs['copycss'] = '' + # sync embedded attribute with :standalone option value + attr_overrides['embedded'] = nil + else + attrs['notitle'] = '' + # sync embedded attribute with :standalone option value + attr_overrides['embedded'] = '' + end attrs['stylesheet'] = '' attrs['webfonts'] = '' - attrs['copycss'] = '' if header_footer attrs['prewrap'] = '' attrs['attribute-undefined'] = Compliance.attribute_undefined attrs['attribute-missing'] = Compliance.attribute_missing @@ -272,29 +365,27 @@ attrs['note-caption'] = 'Note' attrs['tip-caption'] = 'Tip' attrs['warning-caption'] = 'Warning' - attrs['appendix-caption'] = 'Appendix' attrs['example-caption'] = 'Example' attrs['figure-caption'] = 'Figure' #attrs['listing-caption'] = 'Listing' attrs['table-caption'] = 'Table' attrs['toc-title'] = 'Table of Contents' #attrs['preface-title'] = 'Preface' - attrs['manname-title'] = 'NAME' + attrs['section-refsig'] = 'Section' + attrs['part-refsig'] = 'Part' + attrs['chapter-refsig'] = 'Chapter' + attrs['appendix-caption'] = attrs['appendix-refsig'] = 'Appendix' attrs['untitled-label'] = 'Untitled' attrs['version-label'] = 'Version' attrs['last-update-label'] = 'Last updated' attr_overrides['asciidoctor'] = '' - attr_overrides['asciidoctor-version'] = VERSION + attr_overrides['asciidoctor-version'] = ::Asciidoctor::VERSION - safe_mode_name = SafeMode.constants.find {|l| SafeMode.const_get(l) == @safe }.to_s.downcase - attr_overrides['safe-mode-name'] = safe_mode_name + attr_overrides['safe-mode-name'] = (safe_mode_name = SafeMode.name_for_value @safe) attr_overrides["safe-mode-#{safe_mode_name}"] = '' attr_overrides['safe-mode-level'] = @safe - # sync the embedded attribute w/ the value of options...do not allow override - attr_overrides['embedded'] = header_footer ? nil : '' - # the only way to set the max-include-depth attribute is via the API; default to 64 like AsciiDoc Python attr_overrides['max-include-depth'] ||= 64 @@ -303,21 +394,20 @@ attr_overrides['user-home'] = USER_HOME - # legacy support for numbered attribute + # remap legacy attribute names attr_overrides['sectnums'] = attr_overrides.delete 'numbered' if attr_overrides.key? 'numbered' + attr_overrides['hardbreaks-option'] = attr_overrides.delete 'hardbreaks' if attr_overrides.key? 'hardbreaks' - # if the base_dir option is specified, it overrides docdir as the root for relative paths - # otherwise, the base_dir is the directory of the source file (docdir) or the current - # directory of the input is a string - if options[:base_dir] - @base_dir = attr_overrides['docdir'] = ::File.expand_path(options[:base_dir]) + # If the base_dir option is specified, it overrides docdir and is used as the root for relative + # paths. Otherwise, the base_dir is the directory of the source file (docdir), if set, otherwise + # the current directory. + if (base_dir_val = options[:base_dir]) + @base_dir = (attr_overrides['docdir'] = ::File.expand_path base_dir_val) + elsif attr_overrides['docdir'] + @base_dir = attr_overrides['docdir'] else - if attr_overrides['docdir'] - @base_dir = attr_overrides['docdir'] = ::File.expand_path(attr_overrides['docdir']) - else - #warn 'asciidoctor: WARNING: setting base_dir is recommended when working with string documents' unless nested? - @base_dir = attr_overrides['docdir'] = ::File.expand_path(::Dir.pwd) - end + #logger.warn 'setting base_dir is recommended when working with string documents' unless nested? + @base_dir = attr_overrides['docdir'] = ::Dir.pwd end # allow common attributes backend and doctype to be set using options hash, coerce values to string @@ -343,42 +433,42 @@ if @safe >= SafeMode::SECURE attr_overrides['max-attribute-value-size'] = 4096 unless attr_overrides.key? 'max-attribute-value-size' # assign linkcss (preventing css embedding) unless explicitly disabled from the commandline or API - # effectively the same has "has key 'linkcss' and value == nil" - unless attr_overrides.fetch('linkcss', '').nil? - attr_overrides['linkcss'] = '' - end + #attr_overrides['linkcss'] = (attr_overrides.fetch 'linkcss', '') || nil + attr_overrides['linkcss'] = '' unless attr_overrides.key? 'linkcss' # restrict document from enabling icons attr_overrides['icons'] ||= nil end end # the only way to set the max-attribute-value-size attribute is via the API; disabled by default - @max_attribute_value_size = (val = (attr_overrides['max-attribute-value-size'] ||= nil)) ? val.to_i.abs : nil + @max_attribute_value_size = (size = (attr_overrides['max-attribute-value-size'] ||= nil)) ? size.to_i.abs : nil attr_overrides.delete_if do |key, val| - verdict = false - # a nil value undefines the attribute - if val.nil? - attrs.delete(key) - else - # a value ending in @ indicates this attribute does not override - # an attribute with the same key in the document souce + if val + # a value ending in @ allows document to override value if ::String === val && (val.end_with? '@') - val = val.chop - verdict = true + val, verdict = val.chop, true end attrs[key] = val + else + # a nil or false value both unset the attribute; only a nil value locks it + attrs.delete key + verdict = val == false end verdict end if parent_doc - # setup default doctype (backend is fixed) - attrs['doctype'] ||= DEFAULT_DOCTYPE + @backend = attrs['backend'] + # reset doctype unless it matches the default value + unless (@doctype = attrs['doctype'] = parent_doctype) == DEFAULT_DOCTYPE + update_doctype_attributes DEFAULT_DOCTYPE + end # don't need to do the extra processing within our own document # FIXME line info isn't reported correctly within include files in nested document @reader = Reader.new data, options[:cursor] + @source_location = @reader.cursor if @sourcemap # Now parse the lines in the reader into blocks # Eagerly parse (for now) since a subdocument is not a publicly accessible object @@ -389,57 +479,41 @@ @parsed = true else # setup default backend and doctype - if (attrs['backend'] ||= DEFAULT_BACKEND) == 'manpage' - attrs['doctype'] = attr_overrides['doctype'] = 'manpage' + @backend = nil + if (initial_backend = attrs['backend'] || DEFAULT_BACKEND) == 'manpage' + @doctype = attrs['doctype'] = attr_overrides['doctype'] = 'manpage' else - attrs['doctype'] ||= DEFAULT_DOCTYPE + @doctype = (attrs['doctype'] ||= DEFAULT_DOCTYPE) end - update_backend_attributes attrs['backend'], true - - #attrs['indir'] = attrs['docdir'] - #attrs['infile'] = attrs['docfile'] + update_backend_attributes initial_backend, true # dynamic intrinstic attribute values - # See https://reproducible-builds.org/specs/source-date-epoch/ - now = ::ENV['SOURCE_DATE_EPOCH'] ? (::Time.at ::ENV['SOURCE_DATE_EPOCH'].to_i).utc : ::Time.now - localdate = (attrs['localdate'] ||= now.strftime('%Y-%m-%d')) - unless (localtime = attrs['localtime']) - begin - localtime = attrs['localtime'] = now.strftime('%H:%M:%S %Z') - rescue # Asciidoctor.js fails if timezone string has characters outside basic Latin (see asciidoctor.js#23) - localtime = attrs['localtime'] = now.strftime('%H:%M:%S %z') - end - end - attrs['localdatetime'] ||= %(#{localdate} #{localtime}) - - # docdate, doctime and docdatetime should default to - # localdate, localtime and localdatetime if not otherwise set - attrs['docdate'] ||= localdate - attrs['doctime'] ||= localtime - attrs['docdatetime'] ||= %(#{localdate} #{localtime}) + #attrs['indir'] = attrs['docdir'] + #attrs['infile'] = attrs['docfile'] # fallback directories attrs['stylesdir'] ||= '.' - attrs['iconsdir'] ||= ::File.join(attrs.fetch('imagesdir', './images'), 'icons') + attrs['iconsdir'] ||= %(#{attrs.fetch 'imagesdir', './images'}/icons) + + fill_datetime_attributes attrs, input_mtime if initialize_extensions - if (registry = options[:extensions_registry]) - if Extensions::Registry === registry || (::RUBY_ENGINE_JRUBY && - ::AsciidoctorJ::Extensions::ExtensionRegistry === registry) - # take it as it is - else - registry = Extensions::Registry.new + if (ext_registry = options[:extension_registry]) + # QUESTION should we warn if the value type of this option is not a registry + if Extensions::Registry === ext_registry || ((defined? ::AsciidoctorJ::Extensions::ExtensionRegistry) && + ::AsciidoctorJ::Extensions::ExtensionRegistry === ext_registry) + @extensions = ext_registry.activate self end elsif ::Proc === (ext_block = options[:extensions]) - registry = Extensions.build_registry(&ext_block) - else - registry = Extensions::Registry.new + @extensions = Extensions.create(&ext_block).activate self + elsif !Extensions.groups.empty? + @extensions = Extensions::Registry.new.activate self end - @extensions = registry.activate self end - @reader = PreprocessorReader.new self, data, Reader::Cursor.new(attrs['docfile'], @base_dir) + @reader = PreprocessorReader.new self, data, (Reader::Cursor.new attrs['docfile'], @base_dir), normalize: true + @source_location = @reader.cursor if @sourcemap end end @@ -460,7 +534,10 @@ else doc = self # create reader if data is provided (used when data is not known at the time the Document object is created) - @reader = PreprocessorReader.new doc, data, Reader::Cursor.new(@attributes['docfile'], @base_dir) if data + if data + @reader = PreprocessorReader.new doc, data, (Reader::Cursor.new @attributes['docfile'], @base_dir), normalize: true + @source_location = @reader.cursor if @sourcemap + end if (exts = @parent_document ? nil : @extensions) && exts.preprocessors? exts.preprocessors.each do |ext| @@ -469,13 +546,13 @@ end # Now parse the lines in the reader into blocks - Parser.parse @reader, doc, :header_only => !!@options[:parse_header_only] + Parser.parse @reader, doc, header_only: @options[:parse_header_only] # should we call sort of post-parse function? restore_attributes - if exts && exts.treeprocessors? - exts.treeprocessors.each do |ext| + if exts && exts.tree_processors? + exts.tree_processors.each do |ext| if (result = ext.process_method[doc]) && Document === result && result != doc doc = result end @@ -487,25 +564,26 @@ end end + # Public: Returns whether the source lines of the document have been parsed. + def parsed? + @parsed + end + # Public: Get the named counter and take the next number in the sequence. # # name - the String name of the counter # seed - the initial value as a String or Integer # # returns the next number in the sequence for the specified counter - def counter(name, seed = nil) - if (attr_is_seed = !(attr_val = @attributes[name]).nil_or_empty?) && @counters.key?(name) - @counters[name] = nextval(attr_val) + def counter name, seed = nil + return @parent_document.counter name, seed if @parent_document + if (attr_seed = !(attr_val = @attributes[name]).nil_or_empty?) && (@counters.key? name) + @attributes[name] = @counters[name] = Helpers.nextval attr_val + elsif seed + @attributes[name] = @counters[name] = seed == seed.to_i.to_s ? seed.to_i : seed else - if seed.nil? - seed = nextval(attr_is_seed ? attr_val : 0) - elsif seed.to_i.to_s == seed - seed = seed.to_i - end - @counters[name] = seed + @attributes[name] = @counters[name] = Helpers.nextval attr_seed ? attr_val : 0 end - - (@attributes[name] = @counters[name]) end # Public: Increment the specified counter and store it in the block's attributes @@ -514,70 +592,70 @@ # block - the Block on which to save the counter # # returns the next number in the sequence for the specified counter - def counter_increment(counter_name, block) - val = counter(counter_name) - AttributeEntry.new(counter_name, val).save_to(block.attributes) - val + def increment_and_store_counter counter_name, block + ((AttributeEntry.new counter_name, (counter counter_name)).save_to block.attributes).value end + # Deprecated: Map old counter_increment method to increment_counter for backwards compatibility + alias counter_increment increment_and_store_counter - # Internal: Get the next value in the sequence. - # - # Handles both integer and character sequences. - # - # current - the value to increment as a String or Integer - # - # returns the next value in the sequence according to the current value's type - def nextval(current) - if ::Integer === current - current + 1 + # Public: Register a reference in the document catalog + def register type, value + case type + when :ids # deprecated + register :refs, [(id = value[0]), (Inline.new self, :anchor, value[1], type: :ref, id: id)] + when :refs + @catalog[:refs][value[0]] ||= (ref = value[1]) + ref + when :footnotes + @catalog[type] << value else - intval = current.to_i - if intval.to_s != current.to_s - (current[0].ord + 1).chr - else - intval + 1 - end + @catalog[type] << (type == :images ? (ImageReference.new value, @attributes['imagesdir']) : value) if @options[:catalog_assets] end end - def register(type, value, force = false) - case type - when :ids - id, reftext = [*value] - reftext ||= '[' + id + ']' - if force - @references[:ids][id] = reftext - else - @references[:ids][id] ||= reftext - end - when :footnotes, :indexterms - @references[type] << value + # Public: Scan registered references and return the ID of the first reference that matches the specified reference text. + # + # text - The String reference text to compare to the converted reference text of each registered reference. + # + # Returns the String ID of the first reference with matching reference text or nothing if no reference is found. + def resolve_id text + if @reftexts + @reftexts[text] + elsif @parsed + # @reftexts is set eagerly to prevent nested lazy init + (@reftexts = {}).tap {|accum| @catalog[:refs].each {|id, ref| accum[ref.xreftext] ||= id } }[text] else - if @options[:catalog_assets] - @references[type] << value - end + # @reftexts is set eagerly to prevent nested lazy init + resolved_id = nil + # NOTE short-circuit early since we're throwing away this table + (@reftexts = {}).tap {|accum| @catalog[:refs].each {|id, ref| (xreftext = ref.xreftext) == text ? (break (resolved_id = id)) : (accum[xreftext] ||= id) } } + @reftexts = nil + resolved_id end end def footnotes? - !@references[:footnotes].empty? + @catalog[:footnotes].empty? ? false : true end def footnotes - @references[:footnotes] + @catalog[:footnotes] + end + + def callouts + @catalog[:callouts] end def nested? - !!@parent_document + @parent_document ? true : false end def embedded? - # QUESTION should this be !@options[:header_footer] ? @attributes.key? 'embedded' end def extensions? - !!@extensions + @extensions ? true : false end # Make the raw source for the Document available. @@ -590,26 +668,30 @@ @reader.source_lines if @reader end - def doctype - @doctype ||= @attributes['doctype'] - end - - def backend - @backend ||= @attributes['backend'] - end - def basebackend? base @attributes['basebackend'] == base end - # The title explicitly defined in the document attributes + # Public: Return the doctitle as a String + # + # Returns the resolved doctitle as a [String] or nil if a doctitle cannot be resolved def title - @attributes['title'] + doctitle end - def title=(title) - @header ||= Section.new(self, 0) - @header.title = title + # Public: Set the title on the document header + # + # Set the title of the document header to the specified value. If the header + # does not exist, it is first created. + # + # title - the String title to assign as the title of the document header + # + # Returns the new [String] title assigned to the document header + def title= title + unless (sect = @header) + (sect = (@header = Section.new self, 0)).sectname = 'header' + end + sect.title = title end # Public: Resolves the primary title for the document @@ -632,25 +714,27 @@ # Returns the resolved title as a [Title] if the :partition option is passed or a [String] if not # or nil if no value can be resolved. def doctitle opts = {} - if !(val = @attributes['title'].nil_or_empty?) - val = title - elsif (sect = first_section) && sect.title? - val = sect.title - elsif opts[:use_fallback] && (val = @attributes['untitled-label']) - # use val set in condition - else - return + unless (val = @attributes['title']) + if (sect = first_section) + val = sect.title + elsif !(opts[:use_fallback] && (val = @attributes['untitled-label'])) + return + end end if (separator = opts[:partition]) - Title.new val, opts.merge({ :separator => (separator == true ? @attributes['title-separator'] : separator) }) + Title.new val, opts.merge({ separator: (separator == true ? @attributes['title-separator'] : separator) }) elsif opts[:sanitize] && val.include?('<') - val.gsub(XmlSanitizeRx, '').tr_s(' ', ' ').strip + val.gsub(XmlSanitizeRx, '').squeeze(' ').strip else val end end - alias :name :doctitle + alias name doctitle + + def xreftext xrefstyle = nil + (val = reftext) && !val.empty? ? val : title + end # Public: Convenience method to retrieve the document attribute 'author' # @@ -659,6 +743,27 @@ @attributes['author'] end + # Public: Convenience method to retrieve the authors of this document as an Array of Author objects. + # + # This method is backed by the author-related attributes on the document. + # + # returns the authors of this document as an Array + def authors + if (attrs = @attributes).key? 'author' + authors = [(Author.new attrs['author'], attrs['firstname'], attrs['middlename'], attrs['lastname'], attrs['authorinitials'], attrs['email'])] + if (num_authors = attrs['authorcount'] || 0) > 1 + idx = 1 + while idx < num_authors + idx += 1 + authors << (Author.new attrs[%(author_#{idx})], attrs[%(firstname_#{idx})], attrs[%(middlename_#{idx})], attrs[%(lastname_#{idx})], attrs[%(authorinitials_#{idx})], attrs[%(email_#{idx})]) + end + end + authors + else + [] + end + end + # Public: Convenience method to retrieve the document attribute 'revdate' # # returns the date of last revision for the document as a String @@ -678,15 +783,14 @@ @attributes.key? 'nofooter' end - # QUESTION move to AbstractBlock? def first_section - has_header? ? @header : (@blocks || []).find {|e| e.context == :section } + @header || @blocks.find {|e| e.context == :section } end - def has_header? + def header? @header ? true : false end - alias :header? :has_header? + alias has_header? header? # Public: Append a content Block to this Document. # @@ -696,14 +800,14 @@ # # Returns The parent Block def << block - assign_index block if block.context == :section + assign_numeral block if block.context == :section super end - # Internal: called after the header has been parsed and before the content - # will be parsed. + # Internal: Called by the parser after parsing the header and before parsing + # the body, even if no header is found. #-- - # QUESTION should we invoke the Treeprocessors here, passing in a phase? + # QUESTION should we invoke the TreeProcessors here, passing in a phase? # QUESTION is finalize_header the right name? def finalize_header unrooted_attributes, header_valid = true clear_playback_attributes unrooted_attributes @@ -712,100 +816,7 @@ unrooted_attributes end - # Internal: Branch the attributes so that the original state can be restored - # at a future time. - def save_attributes - # enable toc and sectnums (i.e., numbered) by default in DocBook backend - # NOTE the attributes_modified should go away once we have a proper attribute storage & tracking facility - if (attrs = @attributes)['basebackend'] == 'docbook' - attrs['toc'] = '' unless attribute_locked?('toc') || @attributes_modified.include?('toc') - attrs['sectnums'] = '' unless attribute_locked?('sectnums') || @attributes_modified.include?('sectnums') - end - - unless attrs.key?('doctitle') || !(val = doctitle) - attrs['doctitle'] = val - end - - # css-signature cannot be updated after header attributes are processed - @id = attrs['css-signature'] unless @id - - toc_position_val = if (toc_val = (attrs.delete('toc2') ? 'left' : attrs['toc'])) - # toc-placement allows us to separate position from using fitted slot vs macro - (toc_placement = attrs.fetch('toc-placement', 'macro')) && toc_placement != 'auto' ? toc_placement : attrs['toc-position'] - else - nil - end - - if toc_val && (!toc_val.empty? || !toc_position_val.nil_or_empty?) - default_toc_position = 'left' - # TODO rename toc2 to aside-toc - default_toc_class = 'toc2' - if !toc_position_val.nil_or_empty? - position = toc_position_val - elsif !toc_val.empty? - position = toc_val - else - position = default_toc_position - end - attrs['toc'] = '' - attrs['toc-placement'] = 'auto' - case position - when 'left', '<', '<' - attrs['toc-position'] = 'left' - when 'right', '>', '>' - attrs['toc-position'] = 'right' - when 'top', '^' - attrs['toc-position'] = 'top' - when 'bottom', 'v' - attrs['toc-position'] = 'bottom' - when 'preamble', 'macro' - attrs['toc-position'] = 'content' - attrs['toc-placement'] = position - default_toc_class = nil - else - attrs.delete 'toc-position' - default_toc_class = nil - end - attrs['toc-class'] ||= default_toc_class if default_toc_class - end - - if attrs.key? 'compat-mode' - attrs['source-language'] = attrs['language'] if attrs.has_key? 'language' - @compat_mode = true - else - @compat_mode = false - end - - # NOTE pin the outfilesuffix after the header is parsed - @outfilesuffix = attrs['outfilesuffix'] - - @header_attributes = attrs.dup - - # unfreeze "flexible" attributes - unless @parent_document - FLEXIBLE_ATTRIBUTES.each do |name| - # turning a flexible attribute off should be permanent - # (we may need more config if that's not always the case) - if @attribute_overrides.key?(name) && @attribute_overrides[name] - @attribute_overrides.delete(name) - end - end - end - end - - # Internal: Restore the attributes to the previously saved state (attributes in header) - def restore_attributes - @callouts.rewind unless @parent_document - # QUESTION shouldn't this be a dup in case we convert again? - @attributes = @header_attributes - end - - # Internal: Delete any attributes stored for playback - def clear_playback_attributes(attributes) - attributes.delete(:attribute_entries) - end - - # Internal: Replay attribute assignments at the block level + # Public: Replay attribute assignments at the block level def playback_attributes(block_attributes) if block_attributes.key? :attribute_entries block_attributes[:attribute_entries].each do |entry| @@ -821,36 +832,41 @@ end end + # Public: Restore the attributes to the previously saved state (attributes in header) + def restore_attributes + @catalog[:callouts].rewind unless @parent_document + @attributes.replace @header_attributes + end + # Public: Set the specified attribute on the document if the name is not locked # # If the attribute is locked, false is returned. Otherwise, the value is # assigned to the attribute name after first performing attribute - # substitutions on the value. If the attribute name is 'backend', then the - # value of backend-related attributes are updated. + # substitutions on the value. If the attribute name is 'backend' or + # 'doctype', then the value of backend-related attributes are updated. # # name - the String attribute name - # value - the String attribute value + # value - the String attribute value; must not be nil (optional, default: '') # - # returns true if the attribute was set, false if it was not set because it's locked - def set_attribute(name, value) - if attribute_locked?(name) - false - else - if @max_attribute_value_size - resolved_value = (apply_attribute_value_subs value).limit @max_attribute_value_size - else - resolved_value = apply_attribute_value_subs value - end - case name - when 'backend' - update_backend_attributes resolved_value, !!@attributes_modified.delete?('htmlsyntax') - when 'doctype' - update_doctype_attributes resolved_value + # Returns the substituted value if the attribute was set or nil if it was not because it's locked. + def set_attribute name, value = '' + unless attribute_locked? name + value = apply_attribute_value_subs value unless value.empty? + # NOTE if @header_attributes is set, we're beyond the document header + if @header_attributes + @attributes[name] = value else - @attributes[name] = resolved_value + case name + when 'backend' + update_backend_attributes value, (@attributes_modified.delete? 'htmlsyntax') && value == @backend + when 'doctype' + update_doctype_attributes value + else + @attributes[name] = value + end + @attributes_modified << name end - @attributes_modified << name - true + value end end @@ -880,152 +896,33 @@ @attribute_overrides.key?(name) end - # Internal: Apply substitutions to the attribute value - # - # If the value is an inline passthrough macro (e.g., pass:[value]), - # apply the substitutions defined in to the value, or leave the value - # unmodified if no substitutions are specified. If the value is not an - # inline passthrough macro, apply header substitutions to the value. - # - # value - The String attribute value on which to perform substitutions + # Public: Assign a value to the specified attribute in the document header. # - # Returns The String value with substitutions performed - def apply_attribute_value_subs(value) - if (m = AttributeEntryPassMacroRx.match(value)) - if !m[1].empty? - subs = resolve_pass_subs m[1] - subs.empty? ? m[2] : (apply_subs m[2], subs) - else - m[2] - end - else - apply_header_subs value - end - end - - # Public: Update the backend attributes to reflect a change in the selected backend + # The assignment will be visible when the header attributes are restored, + # typically between processor phases (e.g., between parse and convert). # - # This method also handles updating the related doctype attributes if the - # doctype attribute is assigned at the time this method is called. - def update_backend_attributes new_backend, force = false - if force || (new_backend && new_backend != @attributes['backend']) - attrs = @attributes - current_backend = attrs['backend'] - current_basebackend = attrs['basebackend'] - current_doctype = attrs['doctype'] - if new_backend.start_with? 'xhtml' - attrs['htmlsyntax'] = 'xml' - new_backend = new_backend[1..-1] - elsif new_backend.start_with? 'html' - attrs['htmlsyntax'] = 'html' unless attrs['htmlsyntax'] == 'xml' - end - if (resolved_name = BACKEND_ALIASES[new_backend]) - new_backend = resolved_name - end - if current_backend - attrs.delete %(backend-#{current_backend}) - if current_doctype - attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype}) - end - end - if current_doctype - attrs[%(doctype-#{current_doctype})] = '' - attrs[%(backend-#{new_backend}-doctype-#{current_doctype})] = '' - end - attrs['backend'] = new_backend - attrs[%(backend-#{new_backend})] = '' - # (re)initialize converter - if Converter::BackendInfo === (@converter = create_converter) - new_basebackend = @converter.basebackend - attrs['outfilesuffix'] = @converter.outfilesuffix unless attribute_locked? 'outfilesuffix' - new_filetype = @converter.filetype - else - new_basebackend = new_backend.sub TrailingDigitsRx, '' - # QUESTION should we be forcing the basebackend to html if unknown? - new_outfilesuffix = DEFAULT_EXTENSIONS[new_basebackend] || '.html' - new_filetype = new_outfilesuffix[1..-1] - attrs['outfilesuffix'] = new_outfilesuffix unless attribute_locked? 'outfilesuffix' - end - if (current_filetype = attrs['filetype']) - attrs.delete %(filetype-#{current_filetype}) - end - attrs['filetype'] = new_filetype - attrs[%(filetype-#{new_filetype})] = '' - if (page_width = DEFAULT_PAGE_WIDTHS[new_basebackend]) - attrs['pagewidth'] = page_width - else - attrs.delete 'pagewidth' - end - if new_basebackend != current_basebackend - if current_basebackend - attrs.delete %(basebackend-#{current_basebackend}) - if current_doctype - attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype}) - end - end - attrs['basebackend'] = new_basebackend - attrs[%(basebackend-#{new_basebackend})] = '' - attrs[%(basebackend-#{new_basebackend}-doctype-#{current_doctype})] = '' if current_doctype - end - # clear cached backend value - @backend = nil - end - end - - def update_doctype_attributes new_doctype - if new_doctype && new_doctype != @attributes['doctype'] - attrs = @attributes - current_doctype = attrs['doctype'] - current_backend = attrs['backend'] - current_basebackend = attrs['basebackend'] - if current_doctype - attrs.delete %(doctype-#{current_doctype}) - attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype}) if current_backend - attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype}) if current_basebackend - end - attrs['doctype'] = new_doctype - attrs[%(doctype-#{new_doctype})] = '' - attrs[%(backend-#{current_backend}-doctype-#{new_doctype})] = '' if current_backend - attrs[%(basebackend-#{current_basebackend}-doctype-#{new_doctype})] = '' if current_basebackend - # clear cached doctype value - @doctype = nil - end - end - - # TODO document me - def create_converter - converter_opts = {} - converter_opts[:htmlsyntax] = @attributes['htmlsyntax'] - template_dirs = if (template_dir = @options[:template_dir]) - converter_opts[:template_dirs] = [template_dir] - elsif (template_dirs = @options[:template_dirs]) - converter_opts[:template_dirs] = template_dirs - end - if template_dirs - converter_opts[:template_cache] = @options.fetch :template_cache, true - converter_opts[:template_engine] = @options[:template_engine] - converter_opts[:template_engine_options] = @options[:template_engine_options] - converter_opts[:eruby] = @options[:eruby] - converter_opts[:safe] = @safe - end - if (converter = @options[:converter]) - converter_factory = Converter::Factory.new ::Hash[backend, converter] + # name - The String attribute name to assign + # value - The Object value to assign to the attribute (default: '') + # overwrite - A Boolean indicating whether to assign the attribute + # if already present in the attributes Hash (default: true) + # + # Returns a [Boolean] indicating whether the assignment was performed + def set_header_attribute name, value = '', overwrite = true + attrs = @header_attributes || @attributes + if overwrite == false && (attrs.key? name) + false else - converter_factory = Converter::Factory.default false + attrs[name] = value + true end - # QUESTION should we honor the convert_opts? - # QUESTION should we pass through all options and attributes too? - #converter_opts.update opts - converter_factory.create backend, converter_opts end # Public: Convert the AsciiDoc document using the templates # loaded by the Converter. If a :template_dir is not specified, # or a template is missing, the converter will fall back to # using the appropriate built-in template. - #-- - # QUESTION should we dup @header_attributes before converting? def convert opts = {} + @timings.start :convert if @timings parse unless @parsed unless @safe >= SafeMode::SERVER || opts.empty? # QUESTION should we store these on the Document object? @@ -1033,20 +930,24 @@ @attributes.delete 'outdir' unless (@attributes['outdir'] = opts['outdir']) end - # QUESTION should we add processors that execute before conversion begins? - unless @converter - fail %(asciidoctor: FAILED: missing converter for backend '#{backend}'. Processing aborted.) - end + # QUESTION should we add extensions that execute before conversion begins? if doctype == 'inline' - # QUESTION should we warn if @blocks.size > 0 and the first block is not a paragraph? - if (block = @blocks[0]) && block.content_model != :compound - output = block.content - else - output = nil + if (block = @blocks[0] || @header) + if block.content_model == :compound || block.content_model == :empty + logger.warn 'no inline candidate; use the inline doctype to convert a single paragragh, verbatim, or raw block' + else + output = block.content + end end else - transform = ((opts.key? :header_footer) ? opts[:header_footer] : @options[:header_footer]) ? 'document' : 'embedded' + if opts.key? :standalone + transform = opts[:standalone] ? 'document' : 'embedded' + elsif opts.key? :header_footer + transform = opts[:header_footer] ? 'document' : 'embedded' + else + transform = @options[:standalone] ? 'document' : 'embedded' + end output = @converter.convert self, transform end @@ -1058,31 +959,40 @@ end end + @timings.record :convert if @timings output end - # Alias render to convert to maintain backwards compatibility - alias :render :convert + # Deprecated: Use {Document#convert} instead. + alias render convert # Public: Write the output to the specified file # # If the converter responds to :write, delegate the work of writing the file # to that method. Otherwise, write the output the specified file. + # + # Returns nothing def write output, target + @timings.start :write if @timings if Writer === @converter @converter.write output, target else if target.respond_to? :write + # QUESTION should we set encoding using target.set_encoding? unless output.nil_or_empty? target.write output.chomp # ensure there's a trailing endline - target.write EOL + target.write LF end else - ::File.open(target, 'w') {|f| f.write output } + ::File.write target, output, mode: FILE_WRITE_MODE + end + if @backend == 'manpage' && ::String === target && (@converter.class.respond_to? :write_alternate_pages) + @converter.class.write_alternate_pages @attributes['mannames'], @attributes['manvolnum'], target end - nil end + @timings.record :write if @timings + nil end =begin @@ -1123,14 +1033,9 @@ # returns The contents of the docinfo file(s) or empty string if no files are # found or the safe mode is secure or greater. def docinfo location = :head, suffix = nil - if safe >= SafeMode::SECURE - '' - else - qualifier = location == :head ? nil : %(-#{location}) + if safe < SafeMode::SECURE + qualifier = %(-#{location}) unless location == :head suffix = @outfilesuffix unless suffix - docinfodir = @attributes['docinfodir'] - - content = nil if (docinfo = @attributes['docinfo']).nil_or_empty? if @attributes.key? 'docinfo2' @@ -1141,50 +1046,37 @@ docinfo = docinfo ? ['private'] : nil end else - docinfo = docinfo.split(',').map(&:strip) + docinfo = docinfo.split(',').map {|it| it.strip } end if docinfo - docinfo_filename = %(docinfo#{qualifier}#{suffix}) + content = [] + docinfo_file, docinfo_dir, docinfo_subs = %(docinfo#{qualifier}#{suffix}), @attributes['docinfodir'], resolve_docinfo_subs unless (docinfo & ['shared', %(shared-#{location})]).empty? - docinfo_path = normalize_system_path(docinfo_filename, docinfodir) + docinfo_path = normalize_system_path docinfo_file, docinfo_dir # NOTE normalizing the lines is essential if we're performing substitutions - if (content = read_asset(docinfo_path, :normalize => true)) - if (docinfosubs ||= resolve_docinfo_subs) - content = (docinfosubs == :attributes) ? sub_attributes(content) : apply_subs(content, docinfosubs) - end + if (shared_docinfo = read_asset docinfo_path, normalize: true) + content << (apply_subs shared_docinfo, docinfo_subs) end end unless @attributes['docname'].nil_or_empty? || (docinfo & ['private', %(private-#{location})]).empty? - docinfo_path = normalize_system_path(%(#{@attributes['docname']}-#{docinfo_filename}), docinfodir) + docinfo_path = normalize_system_path %(#{@attributes['docname']}-#{docinfo_file}), docinfo_dir # NOTE normalizing the lines is essential if we're performing substitutions - if (content2 = read_asset(docinfo_path, :normalize => true)) - if (docinfosubs ||= resolve_docinfo_subs) - content2 = (docinfosubs == :attributes) ? sub_attributes(content2) : apply_subs(content2, docinfosubs) - end - content = content ? %(#{content}#{EOL}#{content2}) : content2 + if (private_docinfo = read_asset docinfo_path, normalize: true) + content << (apply_subs private_docinfo, docinfo_subs) end end end - - # TODO allow document to control whether extension docinfo is contributed - if @extensions && docinfo_processors?(location) - contentx = @docinfo_processor_extensions[location].map {|candidate| candidate.process_method[self] }.compact * EOL - content = content ? %(#{content}#{EOL}#{contentx}) : contentx - end - - # coerce to string (in case the value is nil) - %(#{content}) end - end - def resolve_docinfo_subs - if @attributes.key? 'docinfosubs' - subs = resolve_subs @attributes['docinfosubs'], :block, nil, 'docinfo' - subs.empty? ? nil : subs + # TODO allow document to control whether extension docinfo is contributed + if @extensions && (docinfo_processors? location) + ((content || []).concat @docinfo_processor_extensions[location].map {|ext| ext.process_method[self] }.compact).join LF + elsif content + content.join LF else - :attributes + '' end end @@ -1192,12 +1084,10 @@ if @docinfo_processor_extensions.key?(location) # false means we already performed a lookup and didn't find any @docinfo_processor_extensions[location] != false + elsif @extensions && @document.extensions.docinfo_processors?(location) + !!(@docinfo_processor_extensions[location] = @document.extensions.docinfo_processors(location)) else - if @extensions && @document.extensions.docinfo_processors?(location) - !!(@docinfo_processor_extensions[location] = @document.extensions.docinfo_processors(location)) - else - @docinfo_processor_extensions[location] = false - end + @docinfo_processor_extensions[location] = false end end @@ -1205,5 +1095,308 @@ %(#<#{self.class}@#{object_id} {doctype: #{doctype.inspect}, doctitle: #{(@header != nil ? @header.title : nil).inspect}, blocks: #{@blocks.size}}>) end + private + + # Internal: Apply substitutions to the attribute value + # + # If the value is an inline passthrough macro (e.g., pass:[value]), + # apply the substitutions defined in to the value, or leave the value + # unmodified if no substitutions are specified. If the value is not an + # inline passthrough macro, apply header substitutions to the value. + # + # value - The String attribute value on which to perform substitutions + # + # Returns The String value with substitutions performed + def apply_attribute_value_subs value + if AttributeEntryPassMacroRx =~ value + value = $2 + value = apply_subs value, (resolve_pass_subs $1) if $1 + else + value = apply_header_subs value + end + @max_attribute_value_size ? (limit_bytesize value, @max_attribute_value_size) : value + end + + # Internal: Safely truncates a string to the specified number of bytes. + # + # If a multibyte char gets split, the dangling fragment is dropped. + # + # str - The String the truncate. + # max - The maximum allowable size of the String, in bytes. + # + # Returns the String truncated to the specified bytesize. + def limit_bytesize str, max + if str.bytesize > max + max -= 1 until (str = str.byteslice 0, max).valid_encoding? + end + str + end + + # Internal: Resolve the list of comma-delimited subs to apply to docinfo files. + # + # Resolve the list of substitutions from the value of the docinfosubs + # document attribute, if specified. Otherwise, return an Array containing + # the Symbol :attributes. + # + # Returns an [Array] of substitution [Symbol]s + def resolve_docinfo_subs + (@attributes.key? 'docinfosubs') ? (resolve_subs @attributes['docinfosubs'], :block, nil, 'docinfo') : [:attributes] + end + + # Internal: Create and initialize an instance of the converter for this document + #-- + # QUESTION is there any additional information we should be passing to the converter? + def create_converter backend, delegate_backend + converter_opts = { document: self, htmlsyntax: @attributes['htmlsyntax'] } + if (template_dirs = (opts = @options)[:template_dirs] || opts[:template_dir]) + converter_opts[:template_dirs] = [*template_dirs] + converter_opts[:template_cache] = opts.fetch :template_cache, true + converter_opts[:template_engine] = opts[:template_engine] + converter_opts[:template_engine_options] = opts[:template_engine_options] + converter_opts[:eruby] = opts[:eruby] + converter_opts[:safe] = @safe + converter_opts[:delegate_backend] = delegate_backend if delegate_backend + end + if (converter = opts[:converter]) + (Converter::CustomFactory.new backend => converter).create backend, converter_opts + else + (opts.fetch :converter_factory, Converter).create backend, converter_opts + end + end + + # Internal: Delete any attributes stored for playback + def clear_playback_attributes(attributes) + attributes.delete(:attribute_entries) + end + + # Internal: Branch the attributes so that the original state can be restored + # at a future time. + # + # Returns the duplicated attributes, which will later be restored + def save_attributes + unless ((attrs = @attributes).key? 'doctitle') || !(doctitle_val = doctitle) + attrs['doctitle'] = doctitle_val + end + + # css-signature cannot be updated after header attributes are processed + @id ||= attrs['css-signature'] + + if (toc_val = (attrs.delete 'toc2') ? 'left' : attrs['toc']) + # toc-placement allows us to separate position from using fitted slot vs macro + toc_position_val = (toc_placement_val = attrs.fetch 'toc-placement', 'macro') && toc_placement_val != 'auto' ? toc_placement_val : attrs['toc-position'] + unless toc_val.empty? && toc_position_val.nil_or_empty? + default_toc_position = 'left' + # TODO rename toc2 to aside-toc + default_toc_class = 'toc2' + position = toc_position_val.nil_or_empty? ? (toc_val.empty? ? default_toc_position : toc_val) : toc_position_val + attrs['toc'] = '' + attrs['toc-placement'] = 'auto' + case position + when 'left', '<', '<' + attrs['toc-position'] = 'left' + when 'right', '>', '>' + attrs['toc-position'] = 'right' + when 'top', '^' + attrs['toc-position'] = 'top' + when 'bottom', 'v' + attrs['toc-position'] = 'bottom' + when 'preamble', 'macro' + attrs['toc-position'] = 'content' + attrs['toc-placement'] = position + default_toc_class = nil + else + attrs.delete 'toc-position' + default_toc_class = nil + end + attrs['toc-class'] ||= default_toc_class if default_toc_class + end + end + + if (icons_val = attrs['icons']) && !(attrs.key? 'icontype') + case icons_val + when '', 'font' + else + attrs['icons'] = '' + attrs['icontype'] = icons_val unless icons_val == 'image' + end + end + + if (@compat_mode = attrs.key? 'compat-mode') + attrs['source-language'] = attrs['language'] if attrs.key? 'language' + end + + unless @parent_document + if (basebackend = attrs['basebackend']) == 'html' + # QUESTION should we allow source-highlighter to be disabled in AsciiDoc table cell? + if (syntax_hl_name = attrs['source-highlighter']) && !attrs[%(#{syntax_hl_name}-unavailable)] + if (syntax_hl_factory = @options[:syntax_highlighter_factory]) + @syntax_highlighter = syntax_hl_factory.create syntax_hl_name, @backend, document: self + elsif (syntax_hls = @options[:syntax_highlighters]) + @syntax_highlighter = (SyntaxHighlighter::DefaultFactoryProxy.new syntax_hls).create syntax_hl_name, @backend, document: self + else + @syntax_highlighter = SyntaxHighlighter.create syntax_hl_name, @backend, document: self + end + end + # enable toc and sectnums (i.e., numbered) by default in DocBook backend + elsif basebackend == 'docbook' + # NOTE the attributes_modified should go away once we have a proper attribute storage & tracking facility + attrs['toc'] = '' unless (attribute_locked? 'toc') || (@attributes_modified.include? 'toc') + attrs['sectnums'] = '' unless (attribute_locked? 'sectnums') || (@attributes_modified.include? 'sectnums') + end + + # NOTE pin the outfilesuffix after the header is parsed + @outfilesuffix = attrs['outfilesuffix'] + + # unfreeze "flexible" attributes + FLEXIBLE_ATTRIBUTES.each do |name| + # turning a flexible attribute off should be permanent + # (we may need more config if that's not always the case) + if @attribute_overrides.key?(name) && @attribute_overrides[name] + @attribute_overrides.delete(name) + end + end + end + + @header_attributes = attrs.merge + end + + # Internal: Assign the local and document datetime attributes, which includes localdate, localyear, localtime, + # localdatetime, docdate, docyear, doctime, and docdatetime. Honor the SOURCE_DATE_EPOCH environment variable, if set. + def fill_datetime_attributes attrs, input_mtime + # See https://reproducible-builds.org/specs/source-date-epoch/ + now = (::ENV.key? 'SOURCE_DATE_EPOCH') ? (source_date_epoch = (::Time.at Integer ::ENV['SOURCE_DATE_EPOCH']).utc) : ::Time.now + if (localdate = attrs['localdate']) + attrs['localyear'] ||= (localdate.index '-') == 4 ? (localdate.slice 0, 4) : nil + else + localdate = attrs['localdate'] = now.strftime '%F' + attrs['localyear'] ||= now.year.to_s + end + # %Z is OS dependent and may contain characters that aren't UTF-8 encoded (see asciidoctor#2770 and asciidoctor.js#23) + localtime = (attrs['localtime'] ||= now.strftime %(%T #{now.utc_offset == 0 ? 'UTC' : '%z'})) + attrs['localdatetime'] ||= %(#{localdate} #{localtime}) + # docdate, doctime and docdatetime should default to localdate, localtime and localdatetime if not otherwise set + input_mtime = source_date_epoch || input_mtime || now + if (docdate = attrs['docdate']) + attrs['docyear'] ||= ((docdate.index '-') == 4 ? (docdate.slice 0, 4) : nil) + else + docdate = attrs['docdate'] = input_mtime.strftime '%F' + attrs['docyear'] ||= input_mtime.year.to_s + end + # %Z is OS dependent and may contain characters that aren't UTF-8 encoded (see asciidoctor#2770 and asciidoctor.js#23) + doctime = (attrs['doctime'] ||= input_mtime.strftime %(%T #{input_mtime.utc_offset == 0 ? 'UTC' : '%z'})) + attrs['docdatetime'] ||= %(#{docdate} #{doctime}) + nil + end + + # Internal: Update the backend attributes to reflect a change in the active backend. + # + # This method also handles updating the related doctype attributes if the + # doctype attribute is assigned at the time this method is called. + # + # Returns the resolved String backend if updated, nothing otherwise. + def update_backend_attributes new_backend, init = nil + if init || new_backend != @backend + current_backend = @backend + current_basebackend = (attrs = @attributes)['basebackend'] + current_doctype = @doctype + actual_backend, _, new_backend = new_backend.partition ':' if new_backend.include? ':' + if new_backend.start_with? 'xhtml' + attrs['htmlsyntax'] = 'xml' + new_backend = new_backend.slice 1, new_backend.length + elsif new_backend.start_with? 'html' + attrs['htmlsyntax'] ||= 'html' + end + new_backend = BACKEND_ALIASES[new_backend] || new_backend + new_backend, delegate_backend = actual_backend, new_backend if actual_backend + if current_doctype + if current_backend + attrs.delete %(backend-#{current_backend}) + attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype}) + end + attrs[%(backend-#{new_backend}-doctype-#{current_doctype})] = '' + attrs[%(doctype-#{current_doctype})] = '' + elsif current_backend + attrs.delete %(backend-#{current_backend}) + end + attrs[%(backend-#{new_backend})] = '' + # QUESTION should we defer the @backend assignment until after the converter is created? + @backend = attrs['backend'] = new_backend + # (re)initialize converter + if Converter::BackendTraits === (converter = create_converter new_backend, delegate_backend) + new_basebackend = converter.basebackend + new_filetype = converter.filetype + if (htmlsyntax = converter.htmlsyntax) + attrs['htmlsyntax'] = htmlsyntax + end + if init + attrs['outfilesuffix'] ||= converter.outfilesuffix + else + attrs['outfilesuffix'] = converter.outfilesuffix unless attribute_locked? 'outfilesuffix' + end + elsif converter + backend_traits = Converter.derive_backend_traits new_backend + new_basebackend = backend_traits[:basebackend] + new_filetype = backend_traits[:filetype] + if init + attrs['outfilesuffix'] ||= backend_traits[:outfilesuffix] + else + attrs['outfilesuffix'] = backend_traits[:outfilesuffix] unless attribute_locked? 'outfilesuffix' + end + else + # NOTE ideally we shouldn't need the converter before the converter phase, but we do + raise ::NotImplementedError, %(asciidoctor: FAILED: missing converter for backend '#{new_backend}'. Processing aborted.) + end + @converter = converter + if (current_filetype = attrs['filetype']) + attrs.delete %(filetype-#{current_filetype}) + end + attrs['filetype'] = new_filetype + attrs[%(filetype-#{new_filetype})] = '' + if (page_width = DEFAULT_PAGE_WIDTHS[new_basebackend]) + attrs['pagewidth'] = page_width + else + attrs.delete 'pagewidth' + end + if new_basebackend != current_basebackend + if current_doctype + if current_basebackend + attrs.delete %(basebackend-#{current_basebackend}) + attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype}) + end + attrs[%(basebackend-#{new_basebackend}-doctype-#{current_doctype})] = '' + elsif current_basebackend + attrs.delete %(basebackend-#{current_basebackend}) + end + attrs[%(basebackend-#{new_basebackend})] = '' + attrs['basebackend'] = new_basebackend + end + new_backend + end + end + + # Internal: Update the doctype and backend attributes to reflect a change in the active doctype. + # + # Returns the String doctype if updated, nothing otherwise. + def update_doctype_attributes new_doctype + if new_doctype && new_doctype != @doctype + current_backend, current_basebackend, current_doctype = @backend, (attrs = @attributes)['basebackend'], @doctype + if current_doctype + attrs.delete %(doctype-#{current_doctype}) + if current_backend + attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype}) + attrs[%(backend-#{current_backend}-doctype-#{new_doctype})] = '' + end + if current_basebackend + attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype}) + attrs[%(basebackend-#{current_basebackend}-doctype-#{new_doctype})] = '' + end + else + attrs[%(backend-#{current_backend}-doctype-#{new_doctype})] = '' if current_backend + attrs[%(basebackend-#{current_basebackend}-doctype-#{new_doctype})] = '' if current_basebackend + end + attrs[%(doctype-#{new_doctype})] = '' + return @doctype = attrs['doctype'] = new_doctype + end + end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/extensions.rb asciidoctor-2.0.10/lib/asciidoctor/extensions.rb --- asciidoctor-1.5.5/lib/asciidoctor/extensions.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/extensions.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,6 @@ -# encoding: UTF-8 +# frozen_string_literal: true +(require 'asciidoctor' unless defined? Asciidoctor.load) unless RUBY_ENGINE == 'opal' + module Asciidoctor # Extensions provide a way to participate in the parsing and converting # phases of the AsciiDoc processor or extend the AsciiDoc syntax. @@ -11,7 +13,7 @@ # 2. The Parser parses the block-level content into an abstract syntax tree. # Custom blocks and block macros are processed by associated {BlockProcessor}s # and {BlockMacroProcessor}s, respectively. -# 3. {Treeprocessor}s are run on the abstract syntax tree. +# 3. {TreeProcessor}s are run on the abstract syntax tree. # 4. Conversion of the document begins, at which point inline markup is processed # and converted. Custom inline macros are processed by associated {InlineMacroProcessor}s. # 5. {Postprocessor}s modify or replace the converted document. @@ -54,27 +56,23 @@ config[key] = default_value end - # Include the DSL class for this processor into this processor class or instance. + # Mixes the DSL class for this processor into this processor class or instance. # - # This method automatically detects whether to use the include or extend keyword - # based on what is appropriate. + # This method automatically detects whether to use the include or extend keyword to mix in the module. # # NOTE Inspiration for this DSL design comes from https://corcoran.io/2013/09/04/simple-pattern-ruby-dsl/ # - # Returns nothing - def use_dsl - if self.name.nil_or_empty? - # NOTE contants(false) doesn't exist in Ruby 1.8.7 - #include const_get :DSL if constants(false).grep :DSL - include const_get :DSL if constants.grep :DSL - else - # NOTE contants(false) doesn't exist in Ruby 1.8.7 - #extend const_get :DSL if constants(false).grep :DSL - extend const_get :DSL if constants.grep :DSL + # Returns self + def enable_dsl + if const_defined? :DSL + if singleton_class? + include const_get :DSL + else + extend const_get :DSL + end end end - alias :extend_dsl :use_dsl - alias :include_dsl :use_dsl + alias use_dsl enable_dsl end # Public: Get the configuration Hash for this processor instance. @@ -89,19 +87,123 @@ end def process *args - raise ::NotImplementedError + raise ::NotImplementedError, %(#{Processor} subclass #{self.class} must implement the ##{__method__} method) + end + + # QUESTION should attributes be an option instead of a parameter? + + # Public: Creates a new Section node. + # + # Creates a Section node in the same manner as the parser. + # + # parent - The parent Section (or Document) of this new Section. + # title - The String title of the new Section. + # attrs - A Hash of attributes to control how the section is built. + # Use the style attribute to set the name of a special section (ex. appendix). + # Use the id attribute to assign an explicit ID or set the value to false to + # disable automatic ID generation (when sectids document attribute is set). + # opts - An optional Hash of options (default: {}): + # :level - [Integer] The level to assign to this section; defaults to + # one greater than the parent level (optional). + # :numbered - [Boolean] A flag to force numbering, which falls back to the + # state of the sectnums document attribute (optional). + # + # Returns a [Section] node with all properties properly initialized. + def create_section parent, title, attrs, opts = {} + doc = parent.document + book = (doctype = doc.doctype) == 'book' + level = opts[:level] || parent.level + 1 + if (style = attrs.delete 'style') + if book && style == 'abstract' + sectname, level = 'chapter', 1 + else + sectname, special = style, true + level = 1 if level == 0 + end + elsif book + sectname = level == 0 ? 'part' : (level > 1 ? 'section' : 'chapter') + elsif doctype == 'manpage' && (title.casecmp 'synopsis') == 0 + sectname, special = 'synopsis', true + else + sectname = 'section' + end + sect = Section.new parent, level + sect.title, sect.sectname = title, sectname + if special + sect.special = true + if opts.fetch :numbered, (style == 'appendix') + sect.numbered = true + elsif !(opts.key? :numbered) && (doc.attr? 'sectnums', 'all') + sect.numbered = book && level == 1 ? :chapter : true + end + elsif level > 0 + if opts.fetch :numbered, (doc.attr? 'sectnums') + sect.numbered = sect.special ? parent.numbered && true : true + end + else + sect.numbered = true if opts.fetch :numbered, (book && (doc.attr? 'partnums')) + end + if (id = attrs['id']) == false + attrs.delete 'id' + else + sect.id = attrs['id'] = id || ((doc.attr? 'sectids') ? (Section.generate_id sect.title, doc) : nil) + end + sect.update_attributes attrs + sect end def create_block parent, context, source, attrs, opts = {} - Block.new parent, context, { :source => source, :attributes => attrs }.merge(opts) + Block.new parent, context, { source: source, attributes: attrs }.merge(opts) end + # Public: Creates a list node and links it to the specified parent. + # + # parent - The parent Block (Block, Section, or Document) of this new list block. + # context - The list context (e.g., :ulist, :olist, :colist, :dlist) + # attrs - A Hash of attributes to set on this list block + # + # Returns a [List] node with all properties properly initialized. + def create_list parent, context, attrs = nil + list = List.new parent, context + list.update_attributes attrs if attrs + list + end + + # Public: Creates a list item node and links it to the specified parent. + # + # parent - The parent List of this new list item block. + # text - The text of the list item. + # + # Returns a [ListItem] node with all properties properly initialized. + def create_list_item parent, text = nil + ListItem.new parent, text + end + + # Public: Creates an image block node and links it to the specified parent. + # + # parent - The parent Block (Block, Section, or Document) of this new image block. + # attrs - A Hash of attributes to control how the image block is built. + # Use the target attribute to set the source of the image. + # Use the alt attribute to specify an alternative text for the image. + # opts - An optional Hash of options (default: {}) + # + # Returns a [Block] node with all properties properly initialized. def create_image_block parent, attrs, opts = {} - create_block parent, :image, nil, attrs, opts + unless (target = attrs['target']) + raise ::ArgumentError, 'Unable to create an image block, target attribute is required' + end + attrs['alt'] ||= (attrs['default-alt'] = Helpers.basename(target, true).tr('_-', ' ')) + title = (attrs.key? 'title') ? (attrs.delete 'title') : nil + block = create_block parent, :image, nil, attrs, opts + if title + block.title = title + block.assign_caption (attrs.delete 'caption'), 'figure' + end + block end def create_inline parent, context, text, opts = {} - Inline.new parent, context, text, opts + Inline.new parent, context, text, context == :quoted ? ({ type: :unquoted }.merge opts) : opts end # Public: Parses blocks in the content and attaches the block to the parent. @@ -111,13 +213,25 @@ # QUESTION is parse_content the right method name? should we wrap in open block automatically? def parse_content parent, content, attributes = nil reader = Reader === content ? content : (Reader.new content) - while reader.has_more_lines? - block = Parser.next_block reader, parent, (attributes ? attributes.dup : {}) - parent << block if block - end + Parser.parse_blocks reader, parent, attributes parent end + # Public: Parses the attrlist String into a Hash of attributes + # + # block - the current AbstractBlock or the parent AbstractBlock if there is no current block (used for applying subs) + # attrlist - the list of attributes as a String + # opts - an optional Hash of options to control processing: + # :positional_attributes - an Array of attribute names to map positional arguments to (optional, default: false) + # :sub_attributes - enables attribute substitution on the attrlist argument (optional, default: false) + # + # Returns a Hash of parsed attributes + def parse_attributes block, attrlist, opts = {} + return {} if attrlist ? attrlist.empty? : true + attrlist = block.sub_attributes attrlist if opts[:sub_attributes] && (attrlist.include? ATTR_REF_HEAD) + (AttributeList.new attrlist).parse (opts[:positional_attributes] || []) + end + # TODO fill out remaining methods [ [:create_paragraph, :create_block, :paragraph], @@ -126,10 +240,12 @@ [:create_pass_block, :create_block, :pass], [:create_listing_block, :create_block, :listing], [:create_literal_block, :create_block, :literal], - [:create_anchor, :create_inline, :anchor] + [:create_anchor, :create_inline, :anchor], + [:create_inline_pass, :create_inline, :quoted], ].each do |method_name, delegate_method_name, context| define_method method_name do |*args| - send delegate_method_name, *args.dup.insert(1, context) + args.unshift args.shift, context + send delegate_method_name, *args end end end @@ -143,63 +259,157 @@ end def process *args, &block - # need to check for both block/proc and lambda - # TODO need test for this! - #if block_given? || (args.size == 1 && ::Proc === (block = args[0])) if block_given? + raise ::ArgumentError, %(wrong number of arguments (given #{args.size}, expected 0)) unless args.empty? + unless block.binding && self == block.binding.receiver + # NOTE remap self in process method to processor instance + context = self + block.define_singleton_method(:call) {|*m_args| context.instance_exec(*m_args, &block) } + end @process_block = block - elsif @process_block - # NOTE Proc automatically expands a single array argument - # ...but lambda doesn't (and we want to accept lambdas too) - # TODO need a test for this! + # TODO enable if we want to support passing proc or lambda as argument instead of block + #elsif ::Proc === args[0] + # raise ::ArgumentError, %(wrong number of arguments (given #{args.size - 1}, expected 0)) unless args.size == 1 + # @process_block = args.shift + elsif defined? @process_block @process_block.call(*args) else - raise ::NotImplementedError + raise ::NotImplementedError, %(#{self.class} ##{__method__} method called before being registered) end end - #alias :process_with :process def process_block_given? defined? @process_block end end + module DocumentProcessorDsl + include ProcessorDsl + + def prefer + option :position, :>> + end + end + + module SyntaxProcessorDsl + include ProcessorDsl + + def named value + # NOTE due to how processors get initialized, we must defer this assignment in some scenarios + if Processor === self + @name = value + else + option :name, value + end + end + + def content_model value + option :content_model, value + end + alias parse_content_as content_model + + def positional_attributes *value + option :positional_attrs, value.flatten + end + alias name_positional_attributes positional_attributes + # NOTE positional_attrs alias is deprecated + alias positional_attrs positional_attributes + + def default_attributes value + option :default_attrs, value + end + # NOTE default_attrs alias is deprecated + alias default_attrs default_attributes + + def resolve_attributes *args + # NOTE assume true as default value; rewrap single-argument string or symbol + if (args = args.fetch 0, true).respond_to? :to_sym + args = [args] + end unless args.size > 1 + case args + when true + option :positional_attrs, [] + option :default_attrs, {} + when ::Array + names, defaults = [], {} + args.each do |arg| + if (arg = arg.to_s).include? '=' + name, _, value = arg.partition '=' + if name.include? ':' + idx, _, name = name.partition ':' + idx = idx == '@' ? names.size : idx.to_i + names[idx] = name + end + defaults[name] = value + elsif arg.include? ':' + idx, _, name = arg.partition ':' + idx = idx == '@' ? names.size : idx.to_i + names[idx] = name + else + names << arg + end + end + option :positional_attrs, names.compact + option :default_attrs, defaults + when ::Hash + names, defaults = [], {} + args.each do |key, val| + if (name = key.to_s).include? ':' + idx, _, name = name.partition ':' + idx = idx == '@' ? names.size : idx.to_i + names[idx] = name + end + defaults[name] = val if val + end + option :positional_attrs, names.compact + option :default_attrs, defaults + else + raise ::ArgumentError, %(unsupported attributes specification for macro: #{args.inspect}) + end + end + # NOTE resolves_attributes alias is deprecated + alias resolves_attributes resolve_attributes + end + # Public: Preprocessors are run after the source text is split into lines and # normalized, but before parsing begins. # # Prior to invoking the preprocessor, Asciidoctor splits the source text into # lines and normalizes them. The normalize process strips trailing whitespace - # from each line and leaves behind a line-feed character (i.e., "\n"). + # and the end of line character sequence from each line. # - # Asciidoctor passes a reference to the Reader and a copy of the lines Array - # to the {Processor#process} method of an instance of each registered - # Preprocessor. The Preprocessor modifies the Array as necessary and either - # returns a reference to the same Reader or a reference to a new Reader. + # Asciidoctor passes the document and the document's Reader to the + # {Processor#process} method of the Preprocessor instance. The Preprocessor + # can modify the Reader as necessary and either return the same Reader (or + # falsy, which is equivalent) or a reference to a substitute Reader. # # Preprocessor implementations must extend the Preprocessor class. class Preprocessor < Processor def process document, reader - raise ::NotImplementedError + raise ::NotImplementedError, %(#{Preprocessor} subclass #{self.class} must implement the ##{__method__} method) end end - Preprocessor::DSL = ProcessorDsl + Preprocessor::DSL = DocumentProcessorDsl - # Public: Treeprocessors are run on the Document after the source has been + # Public: TreeProcessors are run on the Document after the source has been # parsed into an abstract syntax tree (AST), as represented by the Document # object and its child Node objects (e.g., Section, Block, List, ListItem). # # Asciidoctor invokes the {Processor#process} method on an instance of each - # registered Treeprocessor. + # registered TreeProcessor. # - # Treeprocessor implementations must extend Treeprocessor. + # TreeProcessor implementations must extend TreeProcessor. #-- - # QUESTION should the treeprocessor get invoked after parse header too? - class Treeprocessor < Processor + # QUESTION should the tree processor get invoked after parse header too? + class TreeProcessor < Processor def process document - raise ::NotImplementedError + raise ::NotImplementedError, %(#{TreeProcessor} subclass #{self.class} must implement the ##{__method__} method) end end - Treeprocessor::DSL = ProcessorDsl + TreeProcessor::DSL = DocumentProcessorDsl + + # Alias deprecated class name for backwards compatibility + Treeprocessor = TreeProcessor # Public: Postprocessors are run after the document is converted, but before # it is written to the output stream. @@ -218,10 +428,10 @@ # Postprocessor implementations must Postprocessor. class Postprocessor < Processor def process document, output - raise ::NotImplementedError + raise ::NotImplementedError, %(#{Postprocessor} subclass #{self.class} must implement the ##{__method__} method) end end - Postprocessor::DSL = ProcessorDsl + Postprocessor::DSL = DocumentProcessorDsl # Public: IncludeProcessors are used to process `include::[]` # directives in the source document. @@ -233,17 +443,37 @@ # # IncludeProcessor implementations must extend IncludeProcessor. #-- - # TODO add file extension or regexp to shortcut handles? + # TODO add file extension or regexp as shortcut for handles? method class IncludeProcessor < Processor def process document, reader, target, attributes - raise ::NotImplementedError + raise ::NotImplementedError, %(#{IncludeProcessor} subclass #{self.class} must implement the ##{__method__} method) end def handles? target true end end - IncludeProcessor::DSL = ProcessorDsl + + module IncludeProcessorDsl + include DocumentProcessorDsl + + def handles? *args, &block + if block_given? + raise ::ArgumentError, %(wrong number of arguments (given #{args.size}, expected 0)) unless args.empty? + @handles_block = block + # TODO enable if we want to support passing proc or lambda as argument instead of block + #elsif ::Proc === args[0] + # block = args.shift + # raise ::ArgumentError, %(wrong number of arguments (given #{args.size}, expected 0)) unless args.empty? + # @handles_block = block + elsif defined? @handles_block + @handles_block.call args[0] + else + true + end + end + end + IncludeProcessor::DSL = IncludeProcessorDsl # Public: DocinfoProcessors are used to add additional content to # the header and/or footer of the generated document. @@ -254,20 +484,18 @@ # If a location is not specified, the DocinfoProcessor is assumed # to add content to the header. class DocinfoProcessor < Processor - attr_accessor :location - def initialize config = {} super config @config[:location] ||= :head end def process document - raise ::NotImplementedError + raise ::NotImplementedError, %(#{DocinfoProcessor} subclass #{self.class} must implement the ##{__method__} method) end end module DocinfoProcessorDsl - include ProcessorDsl + include DocumentProcessorDsl def at_location value option :location, value @@ -281,7 +509,7 @@ # When Asciidoctor encounters a delimited block or paragraph with an # unrecognized name while parsing the document, it looks for a BlockProcessor # registered to handle this name and, if found, invokes its {Processor#process} - # method to build a cooresponding node in the document tree. + # method to build a corresponding node in the document tree. # # AsciiDoc example: # @@ -293,7 +521,8 @@ # * :named - The name of the block (required: true) # * :contexts - The blocks contexts on which this style can be used (default: [:paragraph, :open] # * :content_model - The structure of the content supported in this block (default: :compound) - # * :positional_attributes - A list of attribute names used to map positional attributes (default: nil) + # * :positional_attrs - A list of attribute names used to map positional attributes (default: nil) + # * :default_attrs - A hash of attribute names and values used to seed the attributes hash (default: nil) # * ... # # BlockProcessor implementations must extend BlockProcessor. @@ -317,46 +546,19 @@ end def process parent, reader, attributes - raise ::NotImplementedError + raise ::NotImplementedError, %(#{BlockProcessor} subclass #{self.class} must implement the ##{__method__} method) end end module BlockProcessorDsl - include ProcessorDsl - - # FIXME this isn't the prettiest thing - def named value - if Processor === self - @name = value - else - option :name, value - end - end - alias :match_name :named - alias :bind_to :named + include SyntaxProcessorDsl def contexts *value - option :contexts, value.flatten - end - alias :on_contexts :contexts - alias :on_context :contexts - - def content_model value - option :content_model, value - end - alias :parse_content_as :content_model - - def positional_attributes *value - option :pos_attrs, value.flatten + option :contexts, value.flatten.to_set end - alias :pos_attrs :positional_attributes - alias :name_attributes :positional_attributes - alias :name_positional_attributes :positional_attributes - - def default_attrs value - option :default_attrs, value - end - alias :seed_attributes_with :default_attrs + alias on_contexts contexts + alias on_context contexts + alias bind_to contexts end BlockProcessor::DSL = BlockProcessorDsl @@ -370,40 +572,23 @@ end def process parent, target, attributes - raise ::NotImplementedError + raise ::NotImplementedError, %(#{MacroProcessor} subclass #{self.class} must implement the ##{__method__} method) end end module MacroProcessorDsl - include ProcessorDsl - # QUESTION perhaps include a SyntaxDsl? + include SyntaxProcessorDsl - def named value - if Processor === self - @name = value + def resolve_attributes *args + if args.size == 1 && !args[0] + option :content_model, :text else - option :name, value + super + option :content_model, :attributes end end - alias :match_name :named - alias :bind_to :named - - def content_model value - option :content_model, value - end - alias :parse_content_as :content_model - - def positional_attributes *value - option :pos_attrs, value.flatten - end - alias :pos_attrs :positional_attributes - alias :name_attributes :positional_attributes - alias :name_positional_attributes :positional_attributes - - def default_attrs value - option :default_attrs, value - end - alias :seed_attributes_with :default_attrs + # NOTE resolves_attributes alias is deprecated + alias resolves_attributes resolve_attributes end # Public: BlockMacroProcessors are used to handle block macros that have a @@ -411,6 +596,10 @@ # # BlockMacroProcessor implementations must extend BlockMacroProcessor. class BlockMacroProcessor < MacroProcessor + def name + raise ::ArgumentError, %(invalid name for block macro: #{@name}) unless MacroNameRx.match? @name.to_s + @name + end end BlockMacroProcessor::DSL = MacroProcessorDsl @@ -420,31 +609,32 @@ # InlineMacroProcessor implementations must extend InlineMacroProcessor. #-- # TODO break this out into different pattern types - # for example, FormalInlineMacro, ShortInlineMacro (no target) and other patterns + # for example, FullInlineMacro, ShortInlineMacro (no target) and other patterns # FIXME for inline passthrough, we need to have some way to specify the text as a passthrough class InlineMacroProcessor < MacroProcessor + @@rx_cache = {} + # Lookup the regexp option, resolving it first if necessary. # Once this method is called, the regexp is considered frozen. def regexp - @config[:regexp] ||= (resolve_regexp @name, @config[:format]) + @config[:regexp] ||= resolve_regexp @name.to_s, @config[:format] end def resolve_regexp name, format - # TODO memoize these regular expressions! - if format == :short - %r(\\?#{name}:\[((?:\\\]|[^\]])*?)\]) - else - %r(\\?#{name}:(\S+?)\[((?:\\\]|[^\]])*?)\]) - end + raise ::ArgumentError, %(invalid name for inline macro: #{name}) unless MacroNameRx.match? name + @@rx_cache[[name, format]] ||= /\\?#{name}:#{format == :short ? '(){0}' : '(\S+?)'}\[(|#{CC_ANY}*?[^\\])\]/ end end module InlineMacroProcessorDsl include MacroProcessorDsl - def using_format value + def format value option :format, value end + alias match_format format + # NOTE using_format alias is deprecated + alias using_format format def match value option :regexp, value @@ -463,9 +653,9 @@ #-- # QUESTION call this ExtensionInfo? class Extension - attr :kind - attr :config - attr :instance + attr_reader :kind + attr_reader :config + attr_reader :instance def initialize kind, instance, config @kind = kind @@ -478,7 +668,7 @@ # reference to the {Processor#process} method. By storing this reference, its # possible to accomodate both concrete extension implementations and Procs. class ProcessorExtension < Extension - attr :process_method + attr_reader :process_method def initialize kind, instance, process_method = nil super kind, instance, instance.config @@ -513,13 +703,12 @@ # Public: Returns the {Asciidoctor::Document} on which the extensions in this registry are being used. attr_reader :document - # Public: Returns the Array of {Group} classes, instances and/or Procs that have been registered. + # Public: Returns the Hash of {Group} classes, instances, and/or Procs that have been registered with this registry. attr_reader :groups def initialize groups = {} @groups = groups - @preprocessor_extensions = @treeprocessor_extensions = @postprocessor_extensions = @include_processor_extensions = @docinfo_processor_extensions =nil - @block_extensions = @block_macro_extensions = @inline_macro_extensions = nil + @preprocessor_extensions = @tree_processor_extensions = @postprocessor_extensions = @include_processor_extensions = @docinfo_processor_extensions = @block_extensions = @block_macro_extensions = @inline_macro_extensions = nil @document = nil end @@ -531,19 +720,21 @@ # Returns the instance of this [Registry]. def activate document @document = document - (Extensions.groups.values + @groups.values).each do |group| - case group - when ::Proc - case group.arity - when 0, -1 - instance_exec(&group) - when 1 - group.call self + unless (ext_groups = Extensions.groups.values + @groups.values).empty? + ext_groups.each do |group| + case group + when ::Proc + case group.arity + when 0, -1 + instance_exec(&group) + when 1 + group.call self + end + when ::Class + group.new.activate self + else + group.activate self end - when ::Class - group.new.activate self - else - group.activate self end end self @@ -575,7 +766,7 @@ # # # as a method block # preprocessor do - # process |reader, lines| + # process do |doc, reader| # ... # end # end @@ -601,58 +792,63 @@ @preprocessor_extensions end - # Public: Registers a {Treeprocessor} with the extension registry to process + # Public: Registers a {TreeProcessor} with the extension registry to process # the AsciiDoc source after parsing is complete. # - # The Treeprocessor may be one of four types: + # The TreeProcessor may be one of four types: # - # * A Treeprocessor subclass - # * An instance of a Treeprocessor subclass - # * The String name of a Treeprocessor subclass - # * A method block (i.e., Proc) that conforms to the Treeprocessor contract + # * A TreeProcessor subclass + # * An instance of a TreeProcessor subclass + # * The String name of a TreeProcessor subclass + # * A method block (i.e., Proc) that conforms to the TreeProcessor contract # - # Unless the Treeprocessor is passed as the method block, it must be the + # Unless the TreeProcessor is passed as the method block, it must be the # first argument to this method. # # Examples # - # # as a Treeprocessor subclass - # treeprocessor ShellTreeprocessor + # # as a TreeProcessor subclass + # tree_processor ShellTreeProcessor # - # # as an instance of a Treeprocessor subclass - # treeprocessor ShellTreeprocessor.new + # # as an instance of a TreeProcessor subclass + # tree_processor ShellTreeProcessor.new # - # # as a name of a Treeprocessor subclass - # treeprocessor 'ShellTreeprocessor' + # # as a name of a TreeProcessor subclass + # tree_processor 'ShellTreeProcessor' # # # as a method block - # treeprocessor do - # process |document| + # tree_processor do + # process do |document| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the - # instance of this Treeprocessor. - def treeprocessor *args, &block - add_document_processor :treeprocessor, args, &block + # instance of this TreeProcessor. + def tree_processor *args, &block + add_document_processor :tree_processor, args, &block end - # Public: Checks whether any {Treeprocessor} extensions have been registered. + # Public: Checks whether any {TreeProcessor} extensions have been registered. # - # Returns a [Boolean] indicating whether any Treeprocessor extensions are registered. - def treeprocessors? - !!@treeprocessor_extensions + # Returns a [Boolean] indicating whether any TreeProcessor extensions are registered. + def tree_processors? + !!@tree_processor_extensions end # Public: Retrieves the {Extension} proxy objects for all - # Treeprocessor instances in this registry. + # TreeProcessor instances in this registry. # # Returns an [Array] of Extension proxy objects. - def treeprocessors - @treeprocessor_extensions + def tree_processors + @tree_processor_extensions end + # Alias deprecated methods for backwards compatibility + alias treeprocessor tree_processor + alias treeprocessors? tree_processors? + alias treeprocessors tree_processors + # Public: Registers a {Postprocessor} with the extension registry to process # the output after conversion is complete. # @@ -679,7 +875,7 @@ # # # as a method block # postprocessor do - # process |document, output| + # process do |document, output| # ... # end # end @@ -731,7 +927,7 @@ # # # as a method block # include_processor do - # process |document, output| + # process do |document, output| # ... # end # end @@ -776,14 +972,14 @@ # docinfo_processor MetaRobotsDocinfoProcessor # # # as an instance of a DocinfoProcessor subclass with an explicit location - # docinfo_processor JQueryDocinfoProcessor.new, :location => :footer + # docinfo_processor JQueryDocinfoProcessor.new, location: :footer # # # as a name of a DocinfoProcessor subclass # docinfo_processor 'MetaRobotsDocinfoProcessor' # # # as a method block # docinfo_processor do - # process |doc| + # process do |doc| # at_location :footer # 'footer content' # end @@ -872,14 +1068,14 @@ # # as a method block # block do # named :shout - # process |parent, reader, attrs| + # process do |parent, reader, attrs| # ... # end # end # # # as a method block with an explicit block name # block :shout do - # process |parent, reader, attrs| + # process do |parent, reader, attrs| # ... # end # end @@ -961,14 +1157,14 @@ # # as a method block # block_macro do # named :gist - # process |parent, target, attrs| + # process do |parent, target, attrs| # ... # end # end # # # as a method block with an explicit macro name # block_macro :gist do - # process |parent, target, attrs| + # process do |parent, target, attrs| # ... # end # end @@ -1033,7 +1229,7 @@ # inline_macro ChromeInlineMacro # # # as an InlineMacroProcessor subclass with an explicit macro name - # inline_macro ChromeInineMacro, :chrome + # inline_macro ChromeInlineMacro, :chrome # # # as an instance of an InlineMacroProcessor subclass # inline_macro ChromeInlineMacro.new @@ -1045,19 +1241,19 @@ # inline_macro 'ChromeInlineMacro' # # # as a name of an InlineMacroProcessor subclass with an explicit macro name - # inline_macro 'ChromeInineMacro', :chrome + # inline_macro 'ChromeInlineMacro', :chrome # # # as a method block # inline_macro do # named :chrome - # process |parent, target, attrs| + # process do |parent, target, attrs| # ... # end # end # # # as a method block with an explicit macro name # inline_macro :chrome do - # process |parent, target, attrs| + # process do |parent, target, attrs| # ... # end # end @@ -1105,39 +1301,56 @@ @inline_macro_extensions.values end + # Public: Inserts the document processor {Extension} instance as the first + # processor of its kind in the extension registry. + # + # Examples + # + # prefer :include_processor do + # process do |document, reader, target, attrs| + # ... + # end + # end + # + # Returns the [Extension] stored in the registry that proxies the instance + # of this processor. + def prefer *args, &block + extension = ProcessorExtension === (arg0 = args.shift) ? arg0 : (send arg0, *args, &block) + extensions_store = instance_variable_get(%(@#{extension.kind}_extensions).to_sym) + extensions_store.unshift extensions_store.delete extension + extension + end + private def add_document_processor kind, args, &block kind_name = kind.to_s.tr '_', ' ' - kind_class_symbol = kind_name.split(' ').map {|word| %(#{word.chr.upcase}#{word[1..-1]}) }.join.to_sym - kind_class = Extensions.const_get kind_class_symbol - kind_java_class = (defined? ::AsciidoctorJ) ? (::AsciidoctorJ::Extensions.const_get kind_class_symbol) : nil + kind_class_symbol = kind_name.split.map {|it| it.capitalize }.join.to_sym + kind_class = Extensions.const_get kind_class_symbol, false + kind_java_class = (defined? ::AsciidoctorJ) ? (::AsciidoctorJ::Extensions.const_get kind_class_symbol, false) : nil kind_store = instance_variable_get(%(@#{kind}_extensions).to_sym) || instance_variable_set(%(@#{kind}_extensions).to_sym, []) # style 1: specified as block extension = if block_given? config = resolve_args args, 1 - # TODO if block arity is 0, assume block is process method - processor = kind_class.new config - # NOTE class << processor idiom doesn't work in Opal - #class << processor - # include_dsl - #end - # NOTE kind_class.contants(false) doesn't exist in Ruby 1.8.7 - processor.extend kind_class.const_get :DSL if kind_class.constants.grep :DSL - processor.instance_exec(&block) - processor.freeze + (processor = kind_class.new config).singleton_class.enable_dsl + if block.arity == 0 + processor.instance_exec(&block) + else + yield processor + end unless processor.process_block_given? - raise ::ArgumentError.new %(No block specified to process #{kind_name} extension at #{block.source_location}) + raise ::ArgumentError, %(No block specified to process #{kind_name} extension at #{block.source_location}) end + processor.freeze ProcessorExtension.new kind, processor else processor, config = resolve_args args, 2 - # style 2: specified as class or class name - if ::Class === processor || (::String === processor && (processor = Extensions.class_for_name processor)) - unless processor < kind_class || (kind_java_class && processor < kind_java_class) - raise ::ArgumentError.new %(Invalid type for #{kind_name} extension: #{processor}) + # style 2: specified as Class or String class name + if (processor_class = Helpers.resolve_class processor) + unless processor_class < kind_class || (kind_java_class && processor_class < kind_java_class) + raise ::ArgumentError, %(Invalid type for #{kind_name} extension: #{processor}) end - processor_instance = processor.new config + processor_instance = processor_class.new config processor_instance.freeze ProcessorExtension.new kind, processor_instance # style 3: specified as instance @@ -1146,71 +1359,61 @@ processor.freeze ProcessorExtension.new kind, processor else - raise ::ArgumentError.new %(Invalid arguments specified for registering #{kind_name} extension: #{args}) + raise ::ArgumentError, %(Invalid arguments specified for registering #{kind_name} extension: #{args}) end end - if extension.config[:position] == :>> - kind_store.unshift extension - else - kind_store << extension - end + extension.config[:position] == :>> ? (kind_store.unshift extension) : (kind_store << extension) + extension end def add_syntax_processor kind, args, &block kind_name = kind.to_s.tr '_', ' ' - kind_class_basename = kind_name.split(' ').map {|word| %(#{word.chr.upcase}#{word[1..-1]}) }.join - kind_class_symbol = %(#{kind_class_basename}Processor).to_sym - kind_class = Extensions.const_get kind_class_symbol - kind_java_class = (defined? ::AsciidoctorJ) ? (::AsciidoctorJ::Extensions.const_get kind_class_symbol) : nil + kind_class_symbol = (kind_name.split.map {|it| it.capitalize } << 'Processor').join.to_sym + kind_class = Extensions.const_get kind_class_symbol, false + kind_java_class = (defined? ::AsciidoctorJ) ? (::AsciidoctorJ::Extensions.const_get kind_class_symbol, false) : nil kind_store = instance_variable_get(%(@#{kind}_extensions).to_sym) || instance_variable_set(%(@#{kind}_extensions).to_sym, {}) # style 1: specified as block if block_given? name, config = resolve_args args, 2 - processor = kind_class.new as_symbol(name), config - # NOTE class << processor idiom doesn't work in Opal - #class << processor - # include_dsl - #end - # NOTE kind_class.contants(false) doesn't exist in Ruby 1.8.7 - processor.extend kind_class.const_get :DSL if kind_class.constants.grep :DSL - if block.arity == 1 - yield processor - else + (processor = kind_class.new (as_symbol name), config).singleton_class.enable_dsl + if block.arity == 0 processor.instance_exec(&block) + else + yield processor end unless (name = as_symbol processor.name) - raise ::ArgumentError.new %(No name specified for #{kind_name} extension at #{block.source_location}) + raise ::ArgumentError, %(No name specified for #{kind_name} extension at #{block.source_location}) end unless processor.process_block_given? - raise ::NoMethodError.new %(No block specified to process #{kind_name} extension at #{block.source_location}) + raise ::NoMethodError, %(No block specified to process #{kind_name} extension at #{block.source_location}) end processor.freeze kind_store[name] = ProcessorExtension.new kind, processor else processor, name, config = resolve_args args, 3 - # style 2: specified as class or class name - if ::Class === processor || (::String === processor && (processor = Extensions.class_for_name processor)) - unless processor < kind_class || (kind_java_class && processor < kind_java_class) - raise ::ArgumentError.new %(Class specified for #{kind_name} extension does not inherit from #{kind_class}: #{processor}) + # style 2: specified as Class or String class name + if (processor_class = Helpers.resolve_class processor) + unless processor_class < kind_class || (kind_java_class && processor_class < kind_java_class) + raise ::ArgumentError, %(Class specified for #{kind_name} extension does not inherit from #{kind_class}: #{processor}) end - processor_instance = processor.new as_symbol(name), config + processor_instance = processor_class.new as_symbol(name), config unless (name = as_symbol processor_instance.name) - raise ::ArgumentError.new %(No name specified for #{kind_name} extension: #{processor}) + raise ::ArgumentError, %(No name specified for #{kind_name} extension: #{processor}) end - processor.freeze + processor_instance.freeze kind_store[name] = ProcessorExtension.new kind, processor_instance # style 3: specified as instance elsif kind_class === processor || (kind_java_class && kind_java_class === processor) processor.update_config config # TODO need a test for this override! unless (name = name ? (processor.name = as_symbol name) : (as_symbol processor.name)) - raise ::ArgumentError.new %(No name specified for #{kind_name} extension: #{processor}) + raise ::ArgumentError, %(No name specified for #{kind_name} extension: #{processor}) end processor.freeze kind_store[name] = ProcessorExtension.new kind, processor else - raise ::ArgumentError.new %(Invalid arguments specified for registering #{kind_name} extension: #{args}) + raise ::ArgumentError, %(Invalid arguments specified for registering #{kind_name} extension: #{args}) end end end @@ -1218,9 +1421,8 @@ def resolve_args args, expect opts = ::Hash === args[-1] ? args.pop : {} return opts if expect == 1 - num_args = args.size - if (missing = expect - 1 - num_args) > 0 - args.fill nil, num_args, missing + if (missing = expect - 1 - args.size) > 0 + args += (::Array.new missing) elsif missing < 0 args.pop(-missing) end @@ -1247,10 +1449,9 @@ @groups ||= {} end - def build_registry name = nil, &block + def create name = nil, &block if block_given? - name ||= generate_name - Registry.new({ name => block }) + Registry.new (name || generate_name) => block else Registry.new end @@ -1291,58 +1492,39 @@ # # Returns the [Proc, Class or Object] instance, matching the type passed to this method. def register *args, &block - argc = args.length - resolved_group = if block_given? - block - elsif !(group = args.pop) - raise ::ArgumentError.new %(Extension group to register not specified) + argc = args.size + if block_given? + resolved_group = block + elsif (group = args.pop) + # QUESTION should we instantiate the group class here or defer until activation?? + resolved_group = (Helpers.resolve_class group) || group else - # QUESTION should we instantiate the group class here or defer until - # activation?? - case group - when ::Class - group - when ::String - class_for_name group - when ::Symbol - class_for_name group.to_s - else - group - end + raise ::ArgumentError, %(Extension group to register not specified) end name = args.pop || generate_name unless args.empty? - raise ::ArgumentError.new %(Wrong number of arguments (#{argc} for 1..2)) + raise ::ArgumentError, %(Wrong number of arguments (#{argc} for 1..2)) end - groups[name] = resolved_group + groups[name.to_sym] = resolved_group end + # Public: Unregister all statically-registered extension groups. + # + # Returns nothing def unregister_all @groups = {} + nil end - # unused atm, but tested - def resolve_class object - ::Class === object ? object : (class_for_name object.to_s) - end - - # Public: Resolves the Class object for the qualified name. - # - # Returns Class - def class_for_name qualified_name - resolved_class = ::Object - qualified_name.split('::').each do |name| - if name.empty? - # do nothing - elsif resolved_class.const_defined? name - resolved_class = resolved_class.const_get name - else - raise %(Could not resolve class for name: #{qualified_name}) - end - end - resolved_class + # Public: Unregister statically-registered extension groups by name. + # + # names - one or more Symbol or String group names to unregister + # + # Returns nothing + def unregister *names + names.each {|group| @groups.delete group.to_sym } + nil end end - end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/helpers.rb asciidoctor-2.0.10/lib/asciidoctor/helpers.rb --- asciidoctor-1.5.5/lib/asciidoctor/helpers.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/helpers.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,14 +1,17 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor +# Internal: Except where noted, a module that contains internal helper functions. module Helpers - # Internal: Require the specified library using Kernel#require. + module_function + + # Public: Require the specified library using Kernel#require. # # Attempts to load the library specified in the first argument using the # Kernel#require. Rescues the LoadError if the library is not available and - # passes a message to Kernel#fail if on_failure is :abort or Kernel#warn if + # passes a message to Kernel#raise if on_failure is :abort or Kernel#warn if # on_failure is :warn to communicate to the user that processing is being # aborted or functionality is disabled, respectively. If a gem_name is - # specified, the message communicates that a required gem is not installed. + # specified, the message communicates that a required gem is not available. # # name - the String name of the library to require. # gem_name - a Boolean that indicates whether this library is provided by a RubyGem, @@ -16,115 +19,92 @@ # (default: true) # on_failure - a Symbol that indicates how to handle a load failure (:abort, :warn, :ignore) (default: :abort) # - # returns The return value of Kernel#require if the library is available and can be, or was previously, loaded. - # Otherwise, Kernel#fail is called with an appropriate message if on_failure is :abort. - # Otherwise, Kernel#warn is called with an appropriate message and nil returned if on_failure is :warn. + # Returns The [Boolean] return value of Kernel#require if the library can be loaded. + # Otherwise, if on_failure is :abort, Kernel#raise is called with an appropriate message. + # Otherwise, if on_failure is :warn, Kernel#warn is called with an appropriate message and nil returned. # Otherwise, nil is returned. - def self.require_library name, gem_name = true, on_failure = :abort + def require_library name, gem_name = true, on_failure = :abort require name - rescue ::LoadError => e + rescue ::LoadError + include Logging unless include? Logging if gem_name gem_name = name if gem_name == true case on_failure when :abort - fail %(asciidoctor: FAILED: required gem '#{gem_name}' is not installed. Processing aborted.) + details = $!.path == gem_name ? '' : %[ (reason: #{$!.path ? %(cannot load '#{$!.path}') : $!.message})] + raise ::LoadError, %(asciidoctor: FAILED: required gem '#{gem_name}' is not available#{details}. Processing aborted.) when :warn - warn %(asciidoctor: WARNING: optional gem '#{gem_name}' is not installed. Functionality disabled.) + details = $!.path == gem_name ? '' : %[ (reason: #{$!.path ? %(cannot load '#{$!.path}') : $!.message})] + logger.warn %(optional gem '#{gem_name}' is not available#{details}. Functionality disabled.) end else case on_failure when :abort - fail %(asciidoctor: FAILED: #{e.message.chomp '.'}. Processing aborted.) + raise ::LoadError, %(asciidoctor: FAILED: #{$!.message.chomp '.'}. Processing aborted.) when :warn - warn %(asciidoctor: WARNING: #{e.message.chomp '.'}. Functionality disabled.) + logger.warn %(#{$!.message.chomp '.'}. Functionality disabled.) end end + nil end - # Public: Normalize the data to prepare for parsing - # - # Delegates to Helpers#normalize_lines_from_string if data is a String. - # Delegates to Helpers#normalize_lines_array if data is a String Array. - # - # returns a String Array of normalized lines - def self.normalize_lines data - data.class == ::String ? (normalize_lines_from_string data) : (normalize_lines_array data) - end - - # Public: Normalize the array of lines to prepare them for parsing + # Internal: Prepare the source data Array for parsing. # - # Force encodes the data to UTF-8 and removes trailing whitespace from each line. + # Encodes the data to UTF-8, if necessary, and removes any trailing + # whitespace from every line. # - # If a BOM is present at the beginning of the data, a best attempt - # is made to encode from the specified encoding to UTF-8. + # If a BOM is found at the beginning of the data, a best attempt is made to + # encode it to UTF-8 from the specified source encoding. # - # data - a String Array of lines to normalize + # data - the source data Array to prepare (no nil entries allowed) # - # returns a String Array of normalized lines - def self.normalize_lines_array data + # returns a String Array of prepared lines + def prepare_source_array data return [] if data.empty? - - # NOTE if data encoding is UTF-*, we only need 0..1 - leading_bytes = (first_line = data[0])[0..2].bytes.to_a - if COERCE_ENCODING - utf8 = ::Encoding::UTF_8 - if (leading_2_bytes = leading_bytes[0..1]) == BOM_BYTES_UTF_16LE - # Ruby messes up trailing whitespace on UTF-16LE, so take a different route - return ((data.join.force_encoding ::Encoding::UTF_16LE)[1..-1].encode utf8).lines.map {|line| line.rstrip } - elsif leading_2_bytes == BOM_BYTES_UTF_16BE - data[0] = (first_line.force_encoding ::Encoding::UTF_16BE)[1..-1] - return data.map {|line| "#{((line.force_encoding ::Encoding::UTF_16BE).encode utf8).rstrip}" } - elsif leading_bytes[0..2] == BOM_BYTES_UTF_8 - data[0] = (first_line.force_encoding utf8)[1..-1] - end - - data.map {|line| line.encoding == utf8 ? line.rstrip : (line.force_encoding utf8).rstrip } - else - # Ruby 1.8 has no built-in re-encoding, so no point in removing the UTF-16 BOMs - if leading_bytes == BOM_BYTES_UTF_8 - data[0] = first_line[3..-1] - end + if (leading_2_bytes = (leading_bytes = (first = data[0]).unpack 'C3').slice 0, 2) == BOM_BYTES_UTF_16LE + data[0] = first.byteslice 2, first.bytesize + # NOTE you can't split a UTF-16LE string using .lines when encoding is UTF-8; doing so will cause this line to fail + return data.map {|line| (line.encode UTF_8, ::Encoding::UTF_16LE).rstrip } + elsif leading_2_bytes == BOM_BYTES_UTF_16BE + data[0] = first.byteslice 2, first.bytesize + return data.map {|line| (line.encode UTF_8, ::Encoding::UTF_16BE).rstrip } + elsif leading_bytes == BOM_BYTES_UTF_8 + data[0] = first.byteslice 3, first.bytesize + end + if first.encoding == UTF_8 data.map {|line| line.rstrip } + else + data.map {|line| (line.encode UTF_8).rstrip } end end - # Public: Normalize the String and split into lines to prepare them for parsing + # Internal: Prepare the source data String for parsing. # - # Force encodes the data to UTF-8 and removes trailing whitespace from each line. - # Converts the data to a String Array. + # Encodes the data to UTF-8, if necessary, splits it into an array, and + # removes any trailing whitespace from every line. # - # If a BOM is present at the beginning of the data, a best attempt - # is made to encode from the specified encoding to UTF-8. + # If a BOM is found at the beginning of the data, a best attempt is made to + # encode it to UTF-8 from the specified source encoding. # - # data - a String of lines to normalize + # data - the source data String to prepare # - # returns a String Array of normalized lines - def self.normalize_lines_from_string data + # returns a String Array of prepared lines + def prepare_source_string data return [] if data.nil_or_empty? - - if COERCE_ENCODING - utf8 = ::Encoding::UTF_8 - # NOTE if data encoding is UTF-*, we only need 0..1 - leading_bytes = data[0..2].bytes.to_a - if (leading_2_bytes = leading_bytes[0..1]) == BOM_BYTES_UTF_16LE - data = (data.force_encoding ::Encoding::UTF_16LE)[1..-1].encode utf8 - elsif leading_2_bytes == BOM_BYTES_UTF_16BE - data = (data.force_encoding ::Encoding::UTF_16BE)[1..-1].encode utf8 - elsif leading_bytes[0..2] == BOM_BYTES_UTF_8 - data = data.encoding == utf8 ? data[1..-1] : (data.force_encoding utf8)[1..-1] - else - data = data.force_encoding utf8 unless data.encoding == utf8 - end - else - # Ruby 1.8 has no built-in re-encoding, so no point in removing the UTF-16 BOMs - if data[0..2].bytes.to_a == BOM_BYTES_UTF_8 - data = data[3..-1] - end + if (leading_2_bytes = (leading_bytes = data.unpack 'C3').slice 0, 2) == BOM_BYTES_UTF_16LE + data = (data.byteslice 2, data.bytesize).encode UTF_8, ::Encoding::UTF_16LE + elsif leading_2_bytes == BOM_BYTES_UTF_16BE + data = (data.byteslice 2, data.bytesize).encode UTF_8, ::Encoding::UTF_16BE + elsif leading_bytes == BOM_BYTES_UTF_8 + data = data.byteslice 3, data.bytesize + data = data.encode UTF_8 unless data.encoding == UTF_8 + elsif data.encoding != UTF_8 + data = data.encode UTF_8 end - data.each_line.map {|line| line.rstrip } + [].tap {|lines| data.each_line {|line| lines << line.rstrip } } end - # Public: Efficiently checks whether the specified String resembles a URI + # Internal: Efficiently checks whether the specified String resembles a URI # # Uses the Asciidoctor::UriSniffRx regex to check whether the String begins # with a URI prefix (e.g., http://). No validation of the URI is performed. @@ -132,77 +112,188 @@ # str - the String to check # # returns true if the String is a URI, false if it is not - def self.uriish? str - (str.include? ':') && str =~ UriSniffRx + def uriish? str + (str.include? ':') && (UriSniffRx.match? str) end - # Public: Efficiently retrieves the URI prefix of the specified String - # - # Uses the Asciidoctor::UriSniffRx regex to match the URI prefix in the - # specified String (e.g., http://), if present. + # Internal: Encode a URI component String for safe inclusion in a URI. # - # str - the String to check + # str - the URI component String to encode # - # returns the string URI prefix if the string is a URI, otherwise nil - def self.uri_prefix str - (str.include? ':') && str =~ UriSniffRx ? $& : nil + # Returns the String with all reserved URI characters encoded (e.g., /, &, =, space, etc). + if RUBY_ENGINE == 'opal' + def encode_uri_component str + # patch necessary to adhere with RFC-3986 (and thus CGI.escape) + # see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent#Description + %x( + return encodeURIComponent(str).replace(/%20|[!'()*]/g, function (m) { + return m === '%20' ? '+' : '%' + m.charCodeAt(0).toString(16) + }) + ) + end + else + CGI = ::CGI + def encode_uri_component str + CGI.escape str + end end - # Matches the characters in a URI to encode - REGEXP_ENCODE_URI_CHARS = /[^\w\-.!~*';:@=+$,()\[\]]/ - - # Public: Encode a string for inclusion in a URI + # Internal: Apply URI path encoding to spaces in the specified string (i.e., convert spaces to %20). # - # str - the string to encode + # str - the String to encode # - # returns an encoded version of the str - def self.encode_uri(str) - str.gsub(REGEXP_ENCODE_URI_CHARS) do - $&.each_byte.map {|c| sprintf '%%%02X', c}.join - end + # Returns the specified String with all spaces replaced with %20. + def encode_spaces_in_uri str + (str.include? ' ') ? (str.gsub ' ', '%20') : str end # Public: Removes the file extension from filename and returns the result # - # file_name - The String file name to process + # filename - The String file name to process; expected to be a posix path # # Examples # - # Helpers.rootname('part1/chapter1.adoc') + # Helpers.rootname 'part1/chapter1.adoc' # # => "part1/chapter1" # # Returns the String filename with the file extension removed - def self.rootname(file_name) - (ext = ::File.extname(file_name)).empty? ? file_name : file_name[0...-ext.length] + def rootname filename + if (last_dot_idx = filename.rindex '.') + (filename.index '/', last_dot_idx) ? filename : (filename.slice 0, last_dot_idx) + else + filename + end end # Public: Retrieves the basename of the filename, optionally removing the extension, if present # - # file_name - The String file name to process - # drop_extname - A Boolean flag indicating whether to drop the extension (default: false) + # filename - The String file name to process. + # drop_ext - A Boolean flag indicating whether to drop the extension + # or an explicit String extension to drop (default: nil). # # Examples # - # Helpers.basename('images/tiger.png', true) + # Helpers.basename 'images/tiger.png', true + # # => "tiger" + # + # Helpers.basename 'images/tiger.png', '.png' # # => "tiger" # # Returns the String filename with leading directories removed and, if specified, the extension removed - def self.basename(file_name, drop_extname = false) - if drop_extname - ::File.basename file_name, (::File.extname file_name) + def basename filename, drop_ext = nil + if drop_ext + ::File.basename filename, (drop_ext == true ? (extname filename) : drop_ext) else - ::File.basename file_name + ::File.basename filename end end - def self.mkdir_p(dir) + # Public: Returns whether this path has a file extension. + # + # path - The path String to check; expects a posix path + # + # Returns true if the path has a file extension, false otherwise + def extname? path + (last_dot_idx = path.rindex '.') && !(path.index '/', last_dot_idx) + end + + # Public: Retrieves the file extension of the specified path. The file extension is the portion of the path in the + # last path segment starting from the last period. + # + # This method differs from File.extname in that it gives us control over the fallback value and is more efficient. + # + # path - The path String in which to look for a file extension + # fallback - The fallback String to return if no file extension is present (optional, default: '') + # + # Returns the String file extension (with the leading dot included) or the fallback value if the path has no file extension. + if ::File::ALT_SEPARATOR + def extname path, fallback = '' + if (last_dot_idx = path.rindex '.') + (path.index '/', last_dot_idx) || (path.index ::File::ALT_SEPARATOR, last_dot_idx) ? fallback : (path.slice last_dot_idx, path.length) + else + fallback + end + end + else + def extname path, fallback = '' + if (last_dot_idx = path.rindex '.') + (path.index '/', last_dot_idx) ? fallback : (path.slice last_dot_idx, path.length) + else + fallback + end + end + end + + # Internal: Make a directory, ensuring all parent directories exist. + def mkdir_p dir unless ::File.directory? dir - parent_dir = ::File.dirname(dir) - if !::File.directory?(parent_dir = ::File.dirname(dir)) && parent_dir != '.' - mkdir_p(parent_dir) + unless (parent_dir = ::File.dirname dir) == '.' + mkdir_p parent_dir + end + begin + ::Dir.mkdir dir + rescue ::SystemCallError + raise unless ::File.directory? dir + end + end + end + + ROMAN_NUMERALS = { + 'M' => 1000, 'CM' => 900, 'D' => 500, 'CD' => 400, 'C' => 100, 'XC' => 90, + 'L' => 50, 'XL' => 40, 'X' => 10, 'IX' => 9, 'V' => 5, 'IV' => 4, 'I' => 1 + } + private_constant :ROMAN_NUMERALS + + # Internal: Converts an integer to a Roman numeral. + # + # val - the [Integer] value to convert + # + # Returns the [String] roman numeral for this integer + def int_to_roman val + ROMAN_NUMERALS.map do |l, i| + repeat, val = val.divmod i + l * repeat + end.join + end + + # Internal: Get the next value in the sequence. + # + # Handles both integer and character sequences. + # + # current - the value to increment as a String or Integer + # + # returns the next value in the sequence according to the current value's type + def nextval current + if ::Integer === current + current + 1 + else + intval = current.to_i + if intval.to_s != current.to_s + (current[0].ord + 1).chr + else + intval + 1 end - ::Dir.mkdir(dir) end end + + # Internal: Resolve the specified object as a Class + # + # object - The Object to resolve as a Class + # + # Returns a Class if the specified object is a Class (but not a Module) or + # a String that resolves to a Class; otherwise, nil + def resolve_class object + ::Class === object ? object : (::String === object ? (class_for_name object) : nil) + end + + # Internal: Resolves a Class object (not a Module) for the qualified name. + # + # Returns Class + def class_for_name qualified_name + raise unless ::Class === (resolved = ::Object.const_get qualified_name, false) + resolved + rescue + raise ::NameError, %(Could not resolve class for name: #{qualified_name}) + end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/inline.rb asciidoctor-2.0.10/lib/asciidoctor/inline.rb --- asciidoctor-1.5.5/lib/asciidoctor/inline.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/inline.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,9 +1,9 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: Methods for managing inline elements in AsciiDoc block class Inline < AbstractNode # Public: Get the text of this inline element - attr_reader :text + attr_accessor :text # Public: Get the type (qualifier) of this inline element attr_reader :type @@ -12,18 +12,12 @@ attr_accessor :target def initialize(parent, context, text = nil, opts = {}) - super(parent, context) + super(parent, context, opts) @node_name = %(inline_#{context}) - @text = text - @id = opts[:id] @type = opts[:type] @target = opts[:target] - - unless (more_attributes = opts[:attributes]).nil_or_empty? - update_attributes more_attributes - end end def block? @@ -38,7 +32,42 @@ converter.convert self end - # Alias render to convert to maintain backwards compatibility - alias :render :convert + # Deprecated: Use {Inline#convert} instead. + alias render convert + + # Public: Returns the converted alt text for this inline image. + # + # Returns the [String] value of the alt attribute. + def alt + (attr 'alt') || '' + end + + # For a reference node (:ref or :bibref), the text is the reftext (and the reftext attribute is not set). + # + # (see AbstractNode#reftext?) + def reftext? + @text && (@type == :ref || @type == :bibref) + end + + # For a reference node (:ref or :bibref), the text is the reftext (and the reftext attribute is not set). + # + # (see AbstractNode#reftext) + def reftext + (val = @text) ? (apply_reftext_subs val) : nil + end + + # Public: Generate cross reference text (xreftext) that can be used to refer + # to this inline node. + # + # Use the explicit reftext for this inline node, if specified, retrieved by + # calling the reftext method. Otherwise, returns nil. + # + # xrefstyle - Not currently used (default: nil). + # + # Returns the [String] reftext to refer to this inline node or nothing if no + # reftext is defined. + def xreftext xrefstyle = nil + reftext + end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/list.rb asciidoctor-2.0.10/lib/asciidoctor/list.rb --- asciidoctor-1.5.5/lib/asciidoctor/list.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/list.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,16 +1,16 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: Methods for managing AsciiDoc lists (ordered, unordered and description lists) class List < AbstractBlock # Public: Create alias for blocks - alias :items :blocks + alias items blocks # Public: Get the items in this list as an Array - alias :content :blocks + alias content blocks # Public: Create alias to check if this list has blocks - alias :items? :blocks? + alias items? blocks? - def initialize parent, context + def initialize parent, context, opts = {} super end @@ -31,8 +31,8 @@ end end - # Alias render to convert to maintain backwards compatibility - alias :render :convert + # Deprecated: Use {List#convert} instead. + alias render convert def to_s %(#<#{self.class}@#{object_id} {context: #{@context.inspect}, style: #{@style.inspect}, items: #{items.size}}>) @@ -41,10 +41,13 @@ end # Public: Methods for managing items for AsciiDoc olists, ulist, and dlists. +# +# In a description list (dlist), each item is a tuple that consists of a 2-item Array of ListItem terms and a ListItem +# description (i.e., [[term, term, ...], desc]. If a description is not set, then the second entry in the tuple is nil. class ListItem < AbstractBlock # A contextual alias for the list parent node; counterpart to the items alias on List - alias :list :parent + alias list parent # Public: Get/Set the String used to mark this list item attr_accessor :marker @@ -57,14 +60,31 @@ super parent, :list_item @text = text @level = parent.level + @subs = NORMAL_SUBS.drop 0 end + # Public: A convenience method that checks whether the text of this list item + # is not blank (i.e., not nil or empty string). def text? - !@text.nil_or_empty? + @text.nil_or_empty? ? false : true end + # Public: Get the String text of this ListItem with substitutions applied. + # + # By default, normal substitutions are applied to the text. The substitutions + # can be modified by altering the subs property of this object. + # + # Returns the converted String text for this ListItem def text - apply_subs @text + # NOTE @text can be nil if dd node only has block content + @text && (apply_subs @text, @subs) + end + + # Public: Set the String text. + # + # Returns the new String text assigned to this ListItem + def text= val + @text = val end # Check whether this list item has simple content (no nested blocks aside from a single outline list). @@ -83,33 +103,16 @@ !simple? end - # Public: Fold the first paragraph block into the text - # - # Here are the rules for when a folding occurs: - # - # Given: this list item has at least one block - # When: the first block is a paragraph that's not connected by a list continuation - # Or: the first block is an indented paragraph that's adjacent (wrapped line) - # Or: the first block is an indented paragraph that's not connected by a list continuation - # Then: then drop the first block and fold it's content (buffer) into the list text + # Internal: Fold the adjacent paragraph block into the list item text # # Returns nothing - def fold_first(continuation_connects_first_block = false, content_adjacent = false) - if (first_block = @blocks[0]) && Block === first_block && - ((first_block.context == :paragraph && !continuation_connects_first_block) || - ((content_adjacent || !continuation_connects_first_block) && first_block.context == :literal && - first_block.option?('listparagraph'))) - - block = blocks.shift - block.lines.unshift @text unless @text.nil_or_empty? - @text = block.source - end + def fold_first + @text = @text.nil_or_empty? ? @blocks.shift.source : %(#{@text}#{LF}#{@blocks.shift.source}) nil end def to_s %(#<#{self.class}@#{object_id} {list_context: #{parent.context.inspect}, text: #{@text.inspect}, blocks: #{(@blocks || []).size}}>) end - end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/load.rb asciidoctor-2.0.10/lib/asciidoctor/load.rb --- asciidoctor-1.5.5/lib/asciidoctor/load.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/load.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,117 @@ +module Asciidoctor + module_function + + # Public: Parse the AsciiDoc source input into a {Document} + # + # Accepts input as an IO (or StringIO), String or String Array object. If the + # input is a File, the object is expected to be opened for reading and is not + # closed afterwards by this method. Information about the file (filename, + # directory name, etc) gets assigned to attributes on the Document object. + # + # input - the AsciiDoc source as a IO, String or Array. + # options - a String, Array or Hash of options to control processing (default: {}) + # String and Array values are converted into a Hash. + # See {Document#initialize} for details about these options. + # + # Returns the Document + def load input, options = {} + options = options.merge + + if (timings = options[:timings]) + timings.start :read + end + + if (logger = options[:logger]) && logger != LoggerManager.logger + LoggerManager.logger = logger + end + + if !(attrs = options[:attributes]) + attrs = {} + elsif ::Hash === attrs + attrs = attrs.merge + elsif (defined? ::Java::JavaUtil::Map) && ::Java::JavaUtil::Map === attrs + attrs = attrs.dup + elsif ::Array === attrs + attrs = {}.tap do |accum| + attrs.each do |entry| + k, _, v = entry.partition '=' + accum[k] = v + end + end + elsif ::String === attrs + # condense and convert non-escaped spaces to null, unescape escaped spaces, then split on null + attrs = {}.tap do |accum| + attrs.gsub(SpaceDelimiterRx, '\1' + NULL).gsub(EscapedSpaceRx, '\1').split(NULL).each do |entry| + k, _, v = entry.partition '=' + accum[k] = v + end + end + elsif (attrs.respond_to? :keys) && (attrs.respond_to? :[]) + # coerce attrs to a real Hash + attrs = {}.tap {|accum| attrs.keys.each {|k| accum[k] = attrs[k] } } + else + raise ::ArgumentError, %(illegal type for attributes option: #{attrs.class.ancestors.join ' < '}) + end + + if ::File === input + options[:input_mtime] = input.mtime + # NOTE defer setting infile and indir until we get a better sense of their purpose + # TODO cli checks if input path can be read and is file, but might want to add check to API too + attrs['docfile'] = input_path = ::File.absolute_path input.path + attrs['docdir'] = ::File.dirname input_path + attrs['docname'] = Helpers.basename input_path, (attrs['docfilesuffix'] = Helpers.extname input_path) + source = input.read + elsif input.respond_to? :read + # NOTE tty, pipes & sockets can't be rewound, but can't be sniffed easily either + # just fail the rewind operation silently to handle all cases + input.rewind rescue nil + source = input.read + elsif ::String === input + source = input + elsif ::Array === input + source = input.drop 0 + elsif input + raise ::ArgumentError, %(unsupported input type: #{input.class}) + end + + if timings + timings.record :read + timings.start :parse + end + + options[:attributes] = attrs + doc = options[:parse] == false ? (Document.new source, options) : (Document.new source, options).parse + + timings.record :parse if timings + doc + rescue => ex + begin + context = %(asciidoctor: FAILED: #{attrs['docfile'] || ''}: Failed to load AsciiDoc document) + if ex.respond_to? :exception + # The original message must be explicitly preserved when wrapping a Ruby exception + wrapped_ex = ex.exception %(#{context} - #{ex.message}) + # JRuby automatically sets backtrace; MRI did not until 2.6 + wrapped_ex.set_backtrace ex.backtrace + else + # Likely a Java exception class + wrapped_ex = ex.class.new context, ex + wrapped_ex.stack_trace = ex.stack_trace + end + rescue + wrapped_ex = ex + end + raise wrapped_ex + end + + # Public: Parse the contents of the AsciiDoc source file into an Asciidoctor::Document + # + # input - the String AsciiDoc source filename + # options - a String, Array or Hash of options to control processing (default: {}) + # String and Array values are converted into a Hash. + # See Asciidoctor::Document#initialize for details about options. + # + # Returns the Asciidoctor::Document + def load_file filename, options = {} + ::File.open(filename, FILE_READ_MODE) {|file| load file, options } + end +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/logging.rb asciidoctor-2.0.10/lib/asciidoctor/logging.rb --- asciidoctor-1.5.5/lib/asciidoctor/logging.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/logging.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,125 @@ +# frozen_string_literal: true +require 'logger' + +module Asciidoctor +class Logger < ::Logger + attr_reader :max_severity + + def initialize *args + super + self.progname = 'asciidoctor' + self.formatter = BasicFormatter.new + self.level = WARN + end + + def add severity, message = nil, progname = nil + if (severity ||= UNKNOWN) > (@max_severity ||= severity) + @max_severity = severity + end + super + end + + class BasicFormatter < Formatter + SEVERITY_LABELS = { 'WARN' => 'WARNING', 'FATAL' => 'FAILED' } + + def call severity, _, progname, msg + %(#{progname}: #{SEVERITY_LABELS[severity] || severity}: #{::String === msg ? msg : msg.inspect}#{LF}) + end + end + + module AutoFormattingMessage + def inspect + (sloc = self[:source_location]) ? %(#{sloc}: #{self[:text]}) : self[:text] + end + end +end + +class MemoryLogger < ::Logger + SEVERITY_LABELS = {}.tap {|accum| (Severity.constants false).each {|c| accum[Severity.const_get c, false] = c } } + + attr_reader :messages + + def initialize + self.level = WARN + @messages = [] + end + + def add severity, message = nil, progname = nil + message = block_given? ? yield : progname unless message + @messages << { severity: SEVERITY_LABELS[severity || UNKNOWN], message: message } + true + end + + def clear + @messages.clear + end + + def empty? + @messages.empty? + end + + def max_severity + empty? ? nil : @messages.map {|m| Severity.const_get m[:severity], false }.max + end +end + +class NullLogger < ::Logger + attr_reader :max_severity + + def initialize + self.level = WARN + end + + def add severity, message = nil, progname = nil + if (severity ||= UNKNOWN) > (@max_severity ||= severity) + @max_severity = severity + end + true + end +end + +module LoggerManager + @logger_class = Logger + class << self + attr_accessor :logger_class + + # NOTE subsequent calls to logger access the logger via the logger property directly + def logger pipe = $stderr + memoize_logger + @logger ||= (@logger_class.new pipe) + end + + def logger= new_logger + @logger = new_logger || (@logger_class.new $stderr) + end + + private + + def memoize_logger + class << self + alias logger logger # suppresses warning from CRuby + attr_reader :logger + end + end + end +end + +module Logging + # Private: Mixes the {Logging} module as static methods into any class that includes the {Logging} module. + # + # into - The Class that includes the {Logging} module + # + # Returns nothing + private_class_method def self.included into + into.extend Logging + end || :included + + def logger + LoggerManager.logger + end + + def message_with_context text, context = {} + ({ text: text }.merge context).extend Logger::AutoFormattingMessage + end +end +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/parser.rb asciidoctor-2.0.10/lib/asciidoctor/parser.rb --- asciidoctor-1.5.5/lib/asciidoctor/parser.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/parser.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,6 +1,6 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor -# Public: Methods to parse lines of AsciiDoc into an object hierarchy +# Internal: Methods to parse lines of AsciiDoc into an object hierarchy # representing the structure of the document. All methods are class methods and # should be invoked from the Parser class. The main entry point is ::next_block. # No Parser instances shall be discovered running around. (Any attempt to @@ -23,29 +23,58 @@ # block.class # # => Asciidoctor::Block class Parser + include Logging BlockMatchData = Struct.new :context, :masq, :tip, :terminator - # Regexp for replacing tab character - TabRx = /\t/ + # String for matching tab character + TAB = ?\t # Regexp for leading tab indentation TabIndentRx = /^\t+/ - StartOfBlockProc = lambda {|l| ((l.start_with? '[') && BlockAttributeLineRx =~ l) || (is_delimited_block? l) } + StartOfBlockProc = proc {|l| ((l.start_with? '[') && (BlockAttributeLineRx.match? l)) || (is_delimited_block? l) } - StartOfListProc = lambda {|l| AnyListRx =~ l } + StartOfListProc = proc {|l| AnyListRx.match? l } - StartOfBlockOrListProc = lambda {|l| (is_delimited_block? l) || ((l.start_with? '[') && BlockAttributeLineRx =~ l) || AnyListRx =~ l } + StartOfBlockOrListProc = proc {|l| (is_delimited_block? l) || ((l.start_with? '[') && (BlockAttributeLineRx.match? l)) || (AnyListRx.match? l) } NoOp = nil - # Public: Make sure the Parser object doesn't get initialized. + AuthorKeys = ['author', 'authorinitials', 'firstname', 'middlename', 'lastname', 'email'] + + # Internal: A Hash mapping horizontal alignment abbreviations to alignments + # that can be applied to a table cell (or to all cells in a column) + TableCellHorzAlignments = { + '<' => 'left', + '>' => 'right', + '^' => 'center' + } + + # Internal: A Hash mapping vertical alignment abbreviations to alignments + # that can be applied to a table cell (or to all cells in a column) + TableCellVertAlignments = { + '<' => 'top', + '>' => 'bottom', + '^' => 'middle' + } + + # Internal: A Hash mapping styles abbreviations to styles that can be applied + # to a table cell (or to all cells in a column) + TableCellStyles = { + 'd' => :none, + 's' => :strong, + 'e' => :emphasis, + 'm' => :monospaced, + 'h' => :header, + 'l' => :literal, + 'a' => :asciidoc + } + + # Hide the default constructor to make sure this class doesn't get instantiated. # - # Raises RuntimeError if this constructor is invoked. - def initialize - raise 'Au contraire, mon frere. No lexer instances will be running around.' - end + # Raises NoMethodError if an attempt is made to invoke the constructor. + private_class_method :new # Public: Parses AsciiDoc source read from the Reader into the Document # @@ -62,10 +91,14 @@ def self.parse(reader, document, options = {}) block_attributes = parse_document_header(reader, document) + # NOTE don't use a postfix conditional here as it's known to confuse JRuby in certain circumstances unless options[:header_only] while reader.has_more_lines? new_section, block_attributes = next_section(reader, document, block_attributes) - document << new_section if new_section + if new_section + document.assign_numeral new_section + document.blocks << new_section + end end end @@ -86,105 +119,142 @@ # returns the Hash of orphan block attributes captured above the header def self.parse_document_header(reader, document) # capture lines of block-level metadata and plow away comment lines that precede first block - block_attributes = parse_block_metadata_lines(reader, document) + block_attrs = parse_block_metadata_lines reader, document + doc_attrs = document.attributes # special case, block title is not allowed above document title, # carry attributes over to the document body - if (has_doctitle_line = is_next_line_document_title?(reader, block_attributes)) && - block_attributes.has_key?('title') - return document.finalize_header block_attributes, false + if (implicit_doctitle = is_next_line_doctitle? reader, block_attrs, doc_attrs['leveloffset']) && block_attrs['title'] + return document.finalize_header block_attrs, false end # yep, document title logic in AsciiDoc is just insanity # definitely an area for spec refinement - assigned_doctitle = nil - unless (val = document.attributes['doctitle']).nil_or_empty? - document.title = assigned_doctitle = val + + unless (val = doc_attrs['doctitle']).nil_or_empty? + document.title = doctitle_attr_val = val end - section_title = nil # if the first line is the document title, add a header to the document and parse the header metadata - if has_doctitle_line + if implicit_doctitle source_location = reader.cursor if document.sourcemap - document.id, _, doctitle, _, single_line = parse_section_title reader, document - unless assigned_doctitle - document.title = assigned_doctitle = doctitle - end - # default to compat-mode if document uses atx-style doctitle - document.set_attribute 'compat-mode', '' unless single_line - if (separator = block_attributes.delete 'separator') - document.set_attribute 'title-separator', separator + document.id, _, l0_section_title, _, atx = parse_section_title reader, document + if doctitle_attr_val + # NOTE doctitle attribute (set above or below implicit doctitle) overrides implicit doctitle + l0_section_title = nil + else + document.title = l0_section_title + doc_attrs['doctitle'] = doctitle_attr_val = document.apply_header_subs l0_section_title end document.header.source_location = source_location if source_location - document.attributes['doctitle'] = section_title = doctitle - # QUESTION: should the id assignment on Document be encapsulated in the Document class? - if document.id - block_attributes.delete 1 - block_attributes.delete 'id' - else - if (style = block_attributes.delete 1) - style_attrs = { 1 => style } - parse_style_attribute style_attrs, reader - block_attributes['id'] = style_attrs['id'] if style_attrs.key? 'id' - end - document.id = block_attributes.delete 'id' + # default to compat-mode if document has setext doctitle + doc_attrs['compat-mode'] = '' unless atx || (document.attribute_locked? 'compat-mode') + if (separator = block_attrs['separator']) + doc_attrs['title-separator'] = separator unless document.attribute_locked? 'title-separator' end + if (doc_id = block_attrs['id']) + document.id = doc_id + else + doc_id = document.id + end + if (role = block_attrs['role']) + doc_attrs['role'] = role + end + if (reftext = block_attrs['reftext']) + doc_attrs['reftext'] = reftext + end + block_attrs.clear + (modified_attrs = document.instance_variable_get :@attributes_modified).delete 'doctitle' parse_header_metadata reader, document + if modified_attrs.include? 'doctitle' + if (val = doc_attrs['doctitle']).nil_or_empty? || val == doctitle_attr_val + doc_attrs['doctitle'] = doctitle_attr_val + else + document.title = val + end + elsif !l0_section_title + modified_attrs << 'doctitle' + end + document.register :refs, [doc_id, document] if doc_id end - unless (val = document.attributes['doctitle']).nil_or_empty? || val == section_title - document.title = assigned_doctitle = val - end - - # restore doctitle attribute to original assignment - document.attributes['doctitle'] = assigned_doctitle if assigned_doctitle - # parse title and consume name section of manpage document - parse_manpage_header(reader, document) if document.doctype == 'manpage' + parse_manpage_header reader, document, block_attrs if document.doctype == 'manpage' - # NOTE block_attributes are the block-level attributes (not document attributes) that + # NOTE block_attrs are the block-level attributes (not document attributes) that # precede the first line of content (document title, first section or first block) - document.finalize_header block_attributes + document.finalize_header block_attrs end # Public: Parses the manpage header of the AsciiDoc source read from the Reader # # returns Nothing - def self.parse_manpage_header(reader, document) - if (m = ManpageTitleVolnumRx.match(document.attributes['doctitle'])) - document.attributes['mantitle'] = document.sub_attributes(m[1].rstrip.downcase) - document.attributes['manvolnum'] = m[2].strip + def self.parse_manpage_header(reader, document, block_attributes) + if ManpageTitleVolnumRx =~ (doc_attrs = document.attributes)['doctitle'] + doc_attrs['manvolnum'] = manvolnum = $2 + doc_attrs['mantitle'] = (((mantitle = $1).include? ATTR_REF_HEAD) ? (document.sub_attributes mantitle) : mantitle).downcase else - warn %(asciidoctor: ERROR: #{reader.prev_line_info}: malformed manpage title) + logger.error message_with_context 'non-conforming manpage title', source_location: (reader.cursor_at_line 1) # provide sensible fallbacks - document.attributes['mantitle'] = document.attributes['doctitle'] - document.attributes['manvolnum'] = '1' + doc_attrs['mantitle'] = doc_attrs['doctitle'] || doc_attrs['docname'] || 'command' + doc_attrs['manvolnum'] = manvolnum = '1' end - - reader.skip_blank_lines - - if is_next_line_section?(reader, {}) - name_section = initialize_section(reader, document, {}) - if name_section.level == 1 - name_section_buffer = reader.read_lines_until(:break_on_blank_lines => true).join(' ').tr_s(' ', ' ') - if (m = ManpageNamePurposeRx.match(name_section_buffer)) - document.attributes['manname'] = document.sub_attributes m[1] - document.attributes['manpurpose'] = m[2] - # TODO parse multiple man names - - if document.backend == 'manpage' - document.attributes['docname'] = document.attributes['manname'] - document.attributes['outfilesuffix'] = %(.#{document.attributes['manvolnum']}) + if (manname = doc_attrs['manname']) && doc_attrs['manpurpose'] + doc_attrs['manname-title'] ||= 'Name' + doc_attrs['mannames'] = [manname] + if document.backend == 'manpage' + doc_attrs['docname'] = manname + doc_attrs['outfilesuffix'] = %(.#{manvolnum}) + end + else + reader.skip_blank_lines + reader.save + block_attributes.update parse_block_metadata_lines reader, document + if (name_section_level = is_next_line_section? reader, {}) + if name_section_level == 1 + name_section = initialize_section reader, document, {} + name_section_buffer = (reader.read_lines_until break_on_blank_lines: true, skip_line_comments: true).map {|l| l.lstrip }.join ' ' + if ManpageNamePurposeRx =~ name_section_buffer + doc_attrs['manname-title'] ||= name_section.title + doc_attrs['manname-id'] = name_section.id if name_section.id + doc_attrs['manpurpose'] = $2 + if (manname = $1).include? ATTR_REF_HEAD + manname = document.sub_attributes manname + end + if manname.include? ',' + manname = (mannames = (manname.split ',').map {|n| n.lstrip })[0] + else + mannames = [manname] + end + doc_attrs['manname'] = manname + doc_attrs['mannames'] = mannames + if document.backend == 'manpage' + doc_attrs['docname'] = manname + doc_attrs['outfilesuffix'] = %(.#{manvolnum}) + end + else + error_msg = 'non-conforming name section body' end else - warn %(asciidoctor: ERROR: #{reader.prev_line_info}: malformed name section body) + error_msg = 'name section must be at level 1' end else - warn %(asciidoctor: ERROR: #{reader.prev_line_info}: name section title must be at level 1) + error_msg = 'name section expected' + end + if error_msg + reader.restore_save + logger.error message_with_context error_msg, source_location: reader.cursor + doc_attrs['manname'] = manname = doc_attrs['docname'] || 'command' + doc_attrs['mannames'] = [manname] + if document.backend == 'manpage' + doc_attrs['docname'] = manname + doc_attrs['outfilesuffix'] = %(.#{manvolnum}) + end + else + reader.discard_save end - else - warn %(asciidoctor: ERROR: #{reader.prev_line_info}: name section expected) end + nil end # Public: Return the next section from the Reader. @@ -212,66 +282,55 @@ # source # # => "= Greetings\n\nThis is my doc.\n\n== Salutations\n\nIt is awesome." # - # reader = Reader.new source, nil, :normalize => true + # reader = Reader.new source, nil, normalize: true # # create empty document to parent the section # # and hold attributes extracted from header # doc = Document.new # - # Parser.next_section(reader, doc).first.title + # Parser.next_section(reader, doc)[0].title # # => "Greetings" # - # Parser.next_section(reader, doc).first.title + # Parser.next_section(reader, doc)[0].title # # => "Salutations" # # returns a two-element Array containing the Section and Hash of orphaned attributes - def self.next_section(reader, parent, attributes = {}) - preamble = false - part = false - intro = false - - # FIXME if attributes[1] is a verbatim style, then don't check for section + def self.next_section reader, parent, attributes = {} + preamble = intro = part = false # check if we are at the start of processing the document # NOTE we could drop a hint in the attributes to indicate # that we are at a section title (so we don't have to check) - if parent.context == :document && parent.blocks.empty? && - ((has_header = parent.has_header?) || attributes.delete('invalid-header') || !is_next_line_section?(reader, attributes)) - doctype = parent.doctype - if has_header || (doctype == 'book' && attributes[1] != 'abstract') - preamble = intro = Block.new(parent, :preamble, :content_model => :compound) - if doctype == 'book' && (parent.attr? 'preface-title') - preamble.title = parent.attr 'preface-title' - end - parent << preamble + if parent.context == :document && parent.blocks.empty? && ((has_header = parent.header?) || + (attributes.delete 'invalid-header') || !(is_next_line_section? reader, attributes)) + book = (document = parent).doctype == 'book' + if has_header || (book && attributes[1] != 'abstract') + preamble = intro = Block.new parent, :preamble, content_model: :compound + preamble.title = parent.attr 'preface-title' if book && (parent.attr? 'preface-title') + parent.blocks << preamble end section = parent - current_level = 0 - if parent.attributes.has_key? 'fragment' - expected_next_levels = nil + if parent.attributes.key? 'fragment' + expected_next_level = -1 # small tweak to allow subsequent level-0 sections for book doctype - elsif doctype == 'book' - expected_next_levels = [0, 1] + elsif book + expected_next_level, expected_next_level_alt = 1, 0 else - expected_next_levels = [1] + expected_next_level = 1 end else - doctype = parent.document.doctype - section = initialize_section(reader, parent, attributes) - # clear attributes, except for title which carries over - # section title to next block of content + book = (document = parent.document).doctype == 'book' + section = initialize_section reader, parent, attributes + # clear attributes except for title attribute, which must be carried over to next content block attributes = (title = attributes['title']) ? { 'title' => title } : {} - current_level = section.level - if current_level == 0 && doctype == 'book' - part = !section.special - # subsections in preface & appendix in multipart books start at level 2 - if section.special && (['preface', 'appendix'].include? section.sectname) - expected_next_levels = [current_level + 2] - else - expected_next_levels = [current_level + 1] + expected_next_level = (current_level = section.level) + 1 + if current_level == 0 + part = book + elsif current_level == 1 && section.special + # NOTE technically preface and abstract sections are only permitted in the book doctype + unless (sectname = section.sectname) == 'appendix' || sectname == 'preface' || sectname == 'abstract' + expected_next_level = nil end - else - expected_next_levels = [current_level + 1] end end @@ -287,32 +346,37 @@ # We have to parse all the metadata lines before continuing with the loop, # otherwise subsequent metadata lines get interpreted as block content while reader.has_more_lines? - parse_block_metadata_lines(reader, section, attributes) - - if (next_level = is_next_line_section? reader, attributes) - next_level += section.document.attr('leveloffset', 0).to_i - if next_level > current_level || (section.context == :document && next_level == 0) - if next_level == 0 && doctype != 'book' - warn %(asciidoctor: ERROR: #{reader.line_info}: only book doctypes can contain level 0 sections) - elsif expected_next_levels && !expected_next_levels.include?(next_level) - warn %(asciidoctor: WARNING: #{reader.line_info}: section title out of sequence: ) + - %(expected #{expected_next_levels.size > 1 ? 'levels' : 'level'} #{expected_next_levels * ' or '}, ) + - %(got level #{next_level}) - end - # the attributes returned are those that are orphaned - new_section, attributes = next_section(reader, section, attributes) - section << new_section - else - if next_level == 0 && doctype != 'book' - warn %(asciidoctor: ERROR: #{reader.line_info}: only book doctypes can contain level 0 sections) + parse_block_metadata_lines reader, document, attributes + if (next_level = is_next_line_section?(reader, attributes)) + if document.attr? 'leveloffset' + next_level += (document.attr 'leveloffset').to_i + next_level = 0 if next_level < 0 + end + if next_level > current_level + if expected_next_level + unless next_level == expected_next_level || (expected_next_level_alt && next_level == expected_next_level_alt) || expected_next_level < 0 + expected_condition = expected_next_level_alt ? %(expected levels #{expected_next_level_alt} or #{expected_next_level}) : %(expected level #{expected_next_level}) + logger.warn message_with_context %(section title out of sequence: #{expected_condition}, got level #{next_level}), source_location: reader.cursor + end + else + logger.error message_with_context %(#{sectname} sections do not support nested sections), source_location: reader.cursor end + new_section, attributes = next_section reader, section, attributes + section.assign_numeral new_section + section.blocks << new_section + elsif next_level == 0 && section == document + logger.error message_with_context 'level 0 sections can only be used when doctype is book', source_location: reader.cursor unless book + new_section, attributes = next_section reader, section, attributes + section.assign_numeral new_section + section.blocks << new_section + else # close this section (and break out of the nesting) to begin a new one break end else # just take one block or else we run the risk of overrunning section boundaries - block_line_info = reader.line_info - if (new_block = next_block reader, (intro || section), attributes, :parse_metadata => false) + block_cursor = reader.cursor + if (new_block = next_block reader, intro || section, attributes, parse_metadata: false) # REVIEW this may be doing too much if part if !section.blocks? @@ -324,61 +388,55 @@ new_block.style = 'partintro' # emulate [partintro] open block else - intro = Block.new section, :open, :content_model => :compound + new_block.parent = (intro = Block.new section, :open, content_model: :compound) intro.style = 'partintro' - new_block.parent = intro - section << intro + section.blocks << intro end end elsif section.blocks.size == 1 first_block = section.blocks[0] # open the [partintro] open block for appending if !intro && first_block.content_model == :compound - #new_block.parent = (intro = first_block) - warn %(asciidoctor: ERROR: #{block_line_info}: illegal block content outside of partintro block) + logger.error message_with_context 'illegal block content outside of partintro block', source_location: block_cursor # rebuild [partintro] paragraph as an open block elsif first_block.content_model != :compound - intro = Block.new section, :open, :content_model => :compound + new_block.parent = (intro = Block.new section, :open, content_model: :compound) intro.style = 'partintro' section.blocks.shift if first_block.style == 'partintro' first_block.context = :paragraph first_block.style = nil end - first_block.parent = intro intro << first_block - new_block.parent = intro - section << intro + section.blocks << intro end end end - (intro || section) << new_block - attributes = {} + (intro || section).blocks << new_block + attributes.clear #else # # don't clear attributes if we don't find a block because they may # # be trailing attributes that didn't get associated with a block end end - reader.skip_blank_lines + reader.skip_blank_lines || break end if part unless section.blocks? && section.blocks[-1].context == :section - warn %(asciidoctor: ERROR: #{reader.line_info}: invalid part, must have at least one section (e.g., chapter, appendix, etc.)) + logger.error message_with_context 'invalid part, must have at least one section (e.g., chapter, appendix, etc.)', source_location: reader.cursor end # NOTE we could try to avoid creating a preamble in the first place, though # that would require reworking assumptions in next_section since the preamble # is treated like an untitled section elsif preamble # implies parent == document - document = parent if preamble.blocks? - # unwrap standalone preamble (i.e., no sections), if permissible - if Compliance.unwrap_standalone_preamble && document.blocks.size == 1 && doctype != 'book' + # unwrap standalone preamble (i.e., document has no sections) except for books, if permissible + unless book || document.blocks[1] || !Compliance.unwrap_standalone_preamble document.blocks.shift while (child_block = preamble.blocks.shift) - child_block.parent = document document << child_block end end @@ -392,569 +450,487 @@ # of a section that need to get transfered to the next section # see "trailing block attributes transfer to the following section" in # test/attributes_test.rb for an example - [section != parent ? section : nil, attributes.dup] + [section != parent ? section : nil, attributes.merge] end - # Public: Return the next Section or Block object from the Reader. - # - # Begins by skipping over blank lines to find the start of the next Section - # or Block. Processes each line of the reader in sequence until a Section or - # Block is found or the reader has no more lines. - # - # Uses regular expressions from the Asciidoctor module to match Section - # and Block delimiters. The ensuing lines are then processed according - # to the type of content. + # Public: Parse and return the next Block at the Reader's current location # - # reader - The Reader from which to retrieve the next block - # parent - The Document, Section or Block to which the next block belongs + # This method begins by skipping over blank lines to find the start of the + # next block (paragraph, block macro, or delimited block). If a block is + # found, that block is parsed, initialized as a Block object, and returned. + # Otherwise, the method returns nothing. + # + # Regular expressions from the Asciidoctor module are used to match block + # boundaries. The ensuing lines are then processed according to the content + # model. + # + # reader - The Reader from which to retrieve the next Block. + # parent - The Document, Section or Block to which the next Block belongs. + # attributes - A Hash of attributes that will become the attributes + # associated with the parsed Block (default: {}). + # options - An options Hash to control parsing (default: {}): + # * :text_only indicates that the parser is only looking for text content + # * :list_type indicates this block will be attached to a list item in a list of the specified type # - # Returns a Section or Block object holding the parsed content of the processed lines - #-- - # QUESTION should next_block have an option for whether it should keep looking until - # a block is found? right now it bails when it encounters a line to be skipped + # Returns a Block object built from the parsed content of the processed + # lines, or nothing if no block is found. def self.next_block(reader, parent, attributes = {}, options = {}) - # Skip ahead to the block content - skipped = reader.skip_blank_lines - - # bail if we've reached the end of the parent block or document - return unless reader.has_more_lines? + # skip ahead to the block content; bail if we've reached the end of the reader + return unless (skipped = reader.skip_blank_lines) # check for option to find list item text only # if skipped a line, assume a list continuation was # used and block content is acceptable - if (text_only = options[:text]) && skipped > 0 - options.delete(:text) - text_only = false + if (text_only = options[:text_only]) && skipped > 0 + options.delete :text_only + text_only = nil end - parse_metadata = options.fetch(:parse_metadata, true) - #parse_sections = options.fetch(:parse_sections, false) - document = parent.document + + if options.fetch :parse_metadata, true + # read lines until there are no more metadata lines to read; note that :text_only option impacts parsing rules + while parse_block_metadata_line reader, document, attributes, options + # discard the line just processed + reader.shift + # QUESTION should we clear the attributes? no known cases when it's necessary + reader.skip_blank_lines || return + end + end + if (extensions = document.extensions) - block_extensions = extensions.blocks? - block_macro_extensions = extensions.block_macros? - else - block_extensions = block_macro_extensions = false + block_extensions, block_macro_extensions = extensions.blocks?, extensions.block_macros? end - #parent_context = Block === parent ? parent.context : nil - in_list = ListItem === parent - block = nil - style = nil - explicit_style = nil - sourcemap = document.sourcemap - source_location = nil - - while !block && reader.has_more_lines? - # if parsing metadata, read until there is no more to read - if parse_metadata && parse_block_metadata_line(reader, document, attributes, options) - reader.advance - next - #elsif parse_sections && !parent_context && is_next_line_section?(reader, attributes) - # block, attributes = next_section(reader, parent, attributes) - # break - end - # QUESTION should we introduce a parsing context object? - source_location = reader.cursor if sourcemap - this_line = reader.read_line - delimited_block = false - block_context = nil - cloaked_context = nil - terminator = nil - # QUESTION put this inside call to rekey attributes? - if attributes[1] - style, explicit_style = parse_style_attribute(attributes, reader) - end - - if (delimited_blk_match = is_delimited_block? this_line, true) - delimited_block = true - block_context = cloaked_context = delimited_blk_match.context - terminator = delimited_blk_match.terminator - if !style - style = attributes['style'] = block_context.to_s - elsif style != block_context.to_s - if delimited_blk_match.masq.include? style + # QUESTION should we introduce a parsing context object? + reader.mark + this_line, doc_attrs, style = reader.read_line, document.attributes, attributes[1] + block = block_context = cloaked_context = terminator = nil + + if (delimited_block = is_delimited_block? this_line, true) + block_context = cloaked_context = delimited_block.context + terminator = delimited_block.terminator + if style + unless style == block_context.to_s + if delimited_block.masq.include? style block_context = style.to_sym - elsif delimited_blk_match.masq.include?('admonition') && ADMONITION_STYLES.include?(style) + elsif delimited_block.masq.include?('admonition') && ADMONITION_STYLES.include?(style) block_context = :admonition elsif block_extensions && extensions.registered_for_block?(style, block_context) block_context = style.to_sym else - warn %(asciidoctor: WARNING: #{reader.prev_line_info}: invalid style for #{block_context} block: #{style}) + logger.debug message_with_context %(unknown style for #{block_context} block: #{style}), source_location: reader.cursor_at_mark if logger.debug? style = block_context.to_s end end + else + style = attributes['style'] = block_context.to_s end + end - unless delimited_block - - # this loop only executes once; used for flow control - # break once a block is found or at end of loop - # returns nil if the line must be dropped - # Implementation note - while(true) is twice as fast as loop - while true + # this loop is used for flow control; it only executes once, and only when delimited_block is not set + # break once a block is found or at end of loop + # returns nil if the line should be dropped + while true + # process lines verbatim + if style && Compliance.strict_verbatim_paragraphs && (VERBATIM_STYLES.include? style) + block_context = style.to_sym + reader.unshift_line this_line + # advance to block parsing => + break + end - # process lines verbatim - if style && Compliance.strict_verbatim_paragraphs && VERBATIM_STYLES.include?(style) - block_context = style.to_sym - reader.unshift_line this_line - # advance to block parsing => + # process lines normally + if text_only + indented = this_line.start_with? ' ', TAB + else + # NOTE move this declaration up if we need it when text_only is false + md_syntax = Compliance.markdown_syntax + if this_line.start_with? ' ' + indented, ch0 = true, ' ' + # QUESTION should we test line length? + if md_syntax && this_line.lstrip.start_with?(*MARKDOWN_THEMATIC_BREAK_CHARS.keys) && + #!(this_line.start_with? ' ') && + (MarkdownThematicBreakRx.match? this_line) + # NOTE we're letting break lines (horizontal rule, page_break, etc) have attributes + block = Block.new(parent, :thematic_break, content_model: :empty) break end - - # process lines normally - unless text_only - first_char = Compliance.markdown_syntax ? this_line.lstrip.chr : this_line.chr + elsif this_line.start_with? TAB + indented, ch0 = true, TAB + else + indented, ch0 = false, this_line.chr + layout_break_chars = md_syntax ? HYBRID_LAYOUT_BREAK_CHARS : LAYOUT_BREAK_CHARS + if (layout_break_chars.key? ch0) && + (md_syntax ? (ExtLayoutBreakRx.match? this_line) : (uniform? this_line, ch0, (ll = this_line.length)) && ll > 2) # NOTE we're letting break lines (horizontal rule, page_break, etc) have attributes - if (LAYOUT_BREAK_LINES.has_key? first_char) && this_line.length >= 3 && - (Compliance.markdown_syntax ? LayoutBreakLinePlusRx : LayoutBreakLineRx) =~ this_line - block = Block.new(parent, LAYOUT_BREAK_LINES[first_char], :content_model => :empty) - break - - elsif this_line.end_with?(']') && (match = MediaBlockMacroRx.match(this_line)) - blk_ctx = match[1].to_sym - block = Block.new(parent, blk_ctx, :content_model => :empty) - if blk_ctx == :image - posattrs = ['alt', 'width', 'height'] - elsif blk_ctx == :video - posattrs = ['poster', 'width', 'height'] - else - posattrs = [] - end - - # QUESTION why did we make exception for explicit style? - #if style && !explicit_style - if style - attributes['alt'] = style if blk_ctx == :image - attributes.delete 'style' - style = nil + block = Block.new(parent, layout_break_chars[ch0], content_model: :empty) + break + # NOTE very rare that a text-only line will end in ] (e.g., inline macro), so check that first + elsif (this_line.end_with? ']') && (this_line.include? '::') + #if (this_line.start_with? 'image', 'video', 'audio') && BlockMediaMacroRx =~ this_line + if (ch0 == 'i' || (this_line.start_with? 'video:', 'audio:')) && BlockMediaMacroRx =~ this_line + blk_ctx, target, blk_attrs = $1.to_sym, $2, $3 + block = Block.new parent, blk_ctx, content_model: :empty + if blk_attrs + case blk_ctx + when :video + posattrs = ['poster', 'width', 'height'] + when :audio + posattrs = [] + else # :image + posattrs = ['alt', 'width', 'height'] + end + block.parse_attributes blk_attrs, posattrs, sub_input: true, into: attributes end - - block.parse_attributes(match[3], posattrs, - :unescape_input => (blk_ctx == :image), - :sub_input => true, - :sub_result => false, - :into => attributes) - target = block.sub_attributes(match[2], :attribute_missing => 'drop-line') - if target.empty? - # retain as unparsed if attribute-missing is skip - if document.attributes.fetch('attribute-missing', Compliance.attribute_missing) == 'skip' - return Block.new(parent, :paragraph, :content_model => :simple, :source => [this_line]) - # otherwise, drop the line - else + # style doesn't have special meaning for media macros + attributes.delete 'style' if attributes.key? 'style' + if target.include? ATTR_REF_HEAD + if (expanded_target = block.sub_attributes target).empty? && + (doc_attrs['attribute-missing'] || Compliance.attribute_missing) == 'drop-line' && + (block.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? attributes.clear return + else + target = expanded_target + end + end + if blk_ctx == :image + document.register :images, target + attributes['imagesdir'] = doc_attrs['imagesdir'] + # NOTE style is the value of the first positional attribute in the block attribute line + attributes['alt'] ||= style || (attributes['default-alt'] = Helpers.basename(target, true).tr('_-', ' ')) + unless (scaledwidth = attributes.delete 'scaledwidth').nil_or_empty? + # NOTE assume % units if not specified + attributes['scaledwidth'] = (TrailingDigitsRx.match? scaledwidth) ? %(#{scaledwidth}%) : scaledwidth + end + if attributes['title'] + block.title = block_title = attributes.delete 'title' + block.assign_caption (attributes.delete 'caption'), 'figure' end end - attributes['target'] = target - # now done down below - #block.title = attributes.delete('title') if attributes.has_key?('title') - #if blk_ctx == :image - # if attributes.has_key? 'scaledwidth' - # # append % to scaledwidth if ends in number (no units present) - # if (48..57).include?((attributes['scaledwidth'][-1] || 0).ord) - # attributes['scaledwidth'] = %(#{attributes['scaledwidth']}%) - # end - # end - # document.register(:images, target) - # attributes['alt'] ||= Helpers.basename(target, true).tr('_-', ' ') - # # QUESTION should video or audio have an auto-numbered caption? - # block.assign_caption attributes.delete('caption'), 'figure' - #end break - # NOTE we're letting the toc macro have attributes - elsif first_char == 't' && (match = TocBlockMacroRx.match(this_line)) - block = Block.new(parent, :toc, :content_model => :empty) - block.parse_attributes(match[1], [], :sub_result => false, :into => attributes) + elsif ch0 == 't' && (this_line.start_with? 'toc:') && BlockTocMacroRx =~ this_line + block = Block.new parent, :toc, content_model: :empty + block.parse_attributes $1, [], into: attributes if $1 break - elsif block_macro_extensions && (match = GenericBlockMacroRx.match(this_line)) && - (extension = extensions.registered_for_block_macro?(match[1])) - target = match[2] - raw_attributes = match[3] - if extension.config[:content_model] == :attributes - unless raw_attributes.empty? - document.parse_attributes(raw_attributes, (extension.config[:pos_attrs] || []), - :sub_input => true, :sub_result => false, :into => attributes) - end + elsif block_macro_extensions ? (CustomBlockMacroRx =~ this_line && + (extension = extensions.registered_for_block_macro? $1) || (report_unknown_block_macro = logger.debug?)) : + (logger.debug? && (report_unknown_block_macro = CustomBlockMacroRx =~ this_line)) + if report_unknown_block_macro + logger.debug message_with_context %(unknown name for block macro: #{$1}), source_location: reader.cursor_at_mark else - attributes['text'] = raw_attributes - end - if (default_attrs = extension.config[:default_attrs]) - default_attrs.each {|k, v| attributes[k] ||= v } - end - if (block = extension.process_method[parent, target, attributes.dup]) - attributes.replace block.attributes - else - attributes.clear - return - end - break - end - end - - # haven't found anything yet, continue - if (match = CalloutListRx.match(this_line)) - block = List.new(parent, :colist) - attributes['style'] = 'arabic' - reader.unshift_line this_line - expected_index = 1 - # NOTE skip the match on the first time through as we've already done it (emulates begin...while) - while match || (reader.has_more_lines? && (match = CalloutListRx.match(reader.peek_line))) - # might want to move this check to a validate method - if match[1].to_i != expected_index - # FIXME this lineno - 2 hack means we need a proper look-behind cursor - warn %(asciidoctor: WARNING: #{reader.path}: line #{reader.lineno - 2}: callout list item index: expected #{expected_index} got #{match[1]}) - end - list_item = next_list_item(reader, block, match) - expected_index += 1 - if list_item - block << list_item - coids = document.callouts.callout_ids(block.items.size) - if !coids.empty? - list_item.attributes['coids'] = coids + content = $3 + if (target = $2).include? ATTR_REF_HEAD + if (expanded_target = parent.sub_attributes target).empty? && + (doc_attrs['attribute-missing'] || Compliance.attribute_missing) == 'drop-line' && + (parent.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? + attributes.clear + return + else + target = expanded_target + end + end + if (ext_config = extension.config)[:content_model] == :attributes + document.parse_attributes content, ext_config[:positional_attrs] || ext_config[:pos_attrs] || [], sub_input: true, into: attributes if content else - # FIXME this lineno - 2 hack means we need a proper look-behind cursor - warn %(asciidoctor: WARNING: #{reader.path}: line #{reader.lineno - 2}: no callouts refer to list item #{block.items.size}) + attributes['text'] = content || '' + end + if (default_attrs = ext_config[:default_attrs]) + attributes.update(default_attrs) {|_, old_v| old_v } + end + if (block = extension.process_method[parent, target, attributes]) + attributes.replace block.attributes + break + else + attributes.clear + return end - end - match = nil - end - - document.callouts.next_list - break - - elsif UnorderedListRx =~ this_line - reader.unshift_line this_line - block = next_outline_list(reader, :ulist, parent) - break - - elsif (match = OrderedListRx.match(this_line)) - reader.unshift_line this_line - block = next_outline_list(reader, :olist, parent) - # TODO move this logic into next_outline_list - if !attributes['style'] && !block.attributes['style'] - marker = block.items[0].marker - if marker.start_with? '.' - # first one makes more sense, but second one is AsciiDoc-compliant - #attributes['style'] = (ORDERED_LIST_STYLES[block.level - 1] || ORDERED_LIST_STYLES[0]).to_s - attributes['style'] = (ORDERED_LIST_STYLES[marker.length - 1] || ORDERED_LIST_STYLES[0]).to_s - else - style = ORDERED_LIST_STYLES.find {|s| OrderedListMarkerRxMap[s] =~ marker } - attributes['style'] = (style || ORDERED_LIST_STYLES[0]).to_s end end - break - - elsif (match = DescriptionListRx.match(this_line)) - reader.unshift_line this_line - block = next_labeled_list(reader, match, parent) - break - - elsif (style == 'float' || style == 'discrete') && - is_section_title?(this_line, (Compliance.underline_style_section_titles ? reader.peek_line(true) : nil)) - reader.unshift_line this_line - float_id, float_reftext, float_title, float_level, _ = parse_section_title(reader, document) - attributes['reftext'] = float_reftext if float_reftext - float_id ||= attributes['id'] if attributes.has_key?('id') - block = Block.new(parent, :floating_title, :content_model => :empty) - if float_id.nil_or_empty? - # FIXME remove hack of creating throwaway Section to get at the generate_id method - tmp_sect = Section.new(parent) - tmp_sect.title = float_title - block.id = tmp_sect.generate_id - else - block.id = float_id - end - block.level = float_level - block.title = float_title - break - - # FIXME create another set for "passthrough" styles - # FIXME make this more DRY! - elsif style && style != 'normal' - if PARAGRAPH_STYLES.include?(style) - block_context = style.to_sym - cloaked_context = :paragraph - reader.unshift_line this_line - # advance to block parsing => - break - elsif ADMONITION_STYLES.include?(style) - block_context = :admonition - cloaked_context = :paragraph - reader.unshift_line this_line - # advance to block parsing => - break - elsif block_extensions && extensions.registered_for_block?(style, :paragraph) - block_context = style.to_sym - cloaked_context = :paragraph - reader.unshift_line this_line - # advance to block parsing => - break - else - warn %(asciidoctor: WARNING: #{reader.prev_line_info}: invalid style for paragraph: #{style}) - style = nil - # continue to process paragraph - end end + end + end - break_at_list = (skipped == 0 && in_list) - - # a literal paragraph is contiguous lines starting at least one space - if style != 'normal' && LiteralParagraphRx =~ this_line - # So we need to actually include this one in the read_lines group - reader.unshift_line this_line - lines = read_paragraph_lines reader, break_at_list, :skip_line_comments => text_only - - adjust_indentation! lines - - block = Block.new(parent, :literal, :content_model => :verbatim, :source => lines, :attributes => attributes) - # a literal gets special meaning inside of a description list - # TODO this feels hacky, better way to distinguish from explicit literal block? - block.set_option('listparagraph') if in_list + # haven't found anything yet, continue + if !indented && (ch0 ||= this_line.chr) == '<' && CalloutListRx =~ this_line + reader.unshift_line this_line + block = parse_callout_list(reader, $~, parent, document.callouts) + attributes['style'] = 'arabic' + break - # a paragraph is contiguous nonblank/noncontinuation lines - else - reader.unshift_line this_line - lines = read_paragraph_lines reader, break_at_list, :skip_line_comments => true + elsif UnorderedListRx.match? this_line + reader.unshift_line this_line + attributes['style'] = style = 'bibliography' if !style && Section === parent && parent.sectname == 'bibliography' + block = parse_list(reader, :ulist, parent, style) + break - # NOTE we need this logic because we've asked the reader to skip - # line comments, which may leave us w/ an empty buffer if those - # were the only lines found - if lines.empty? - # call advance since the reader preserved the last line - reader.advance - return - end - - catalog_inline_anchors(lines.join(EOL), document) - - first_line = lines[0] - if !text_only && (admonition_match = AdmonitionParagraphRx.match(first_line)) - lines[0] = admonition_match.post_match.lstrip - attributes['style'] = admonition_match[1] - attributes['name'] = admonition_name = admonition_match[1].downcase - attributes['caption'] ||= document.attributes[%(#{admonition_name}-caption)] - block = Block.new(parent, :admonition, :content_model => :simple, :source => lines, :attributes => attributes) - elsif !text_only && Compliance.markdown_syntax && first_line.start_with?('> ') - lines.map! {|line| - if line == '>' - line[1..-1] - elsif line.start_with? '> ' - line[2..-1] - else - line - end - } + elsif OrderedListRx.match? this_line + reader.unshift_line this_line + block = parse_list(reader, :olist, parent, style) + attributes['style'] = block.style if block.style + break - if lines[-1].start_with? '-- ' - attribution, citetitle = lines.pop[3..-1].split(', ', 2) - lines.pop while lines[-1].empty? - else - attribution, citetitle = nil - end - attributes['style'] = 'quote' - attributes['attribution'] = attribution if attribution - attributes['citetitle'] = citetitle if citetitle - # NOTE will only detect headings that are floating titles (not section titles) - # TODO could assume a floating title when inside a block context - # FIXME Reader needs to be created w/ line info - block = build_block(:quote, :compound, false, parent, Reader.new(lines), attributes) - elsif !text_only && (blockquote? lines, first_line) - lines[0] = first_line[1..-1] - attribution, citetitle = lines.pop[3..-1].split(', ', 2) - lines.pop while lines[-1].empty? - # strip trailing quote - lines[-1] = lines[-1].chop - attributes['style'] = 'quote' - attributes['attribution'] = attribution if attribution - attributes['citetitle'] = citetitle if citetitle - block = Block.new(parent, :quote, :content_model => :simple, :source => lines, :attributes => attributes) - else - # if [normal] is used over an indented paragraph, shift content to left margin - if style == 'normal' - # QUESTION do we even need to shift since whitespace is normalized by XML in this case? - adjust_indentation! lines - end + elsif ((this_line.include? '::') || (this_line.include? ';;')) && DescriptionListRx =~ this_line + reader.unshift_line this_line + block = parse_description_list(reader, $~, parent) + break - block = Block.new(parent, :paragraph, :content_model => :simple, :source => lines, :attributes => attributes) - end - end + elsif (style == 'float' || style == 'discrete') && (Compliance.underline_style_section_titles ? + (is_section_title? this_line, reader.peek_line) : !indented && (atx_section_title? this_line)) + reader.unshift_line this_line + float_id, float_reftext, block_title, float_level = parse_section_title reader, document, attributes['id'] + attributes['reftext'] = float_reftext if float_reftext + block = Block.new(parent, :floating_title, content_model: :empty) + block.title = block_title + attributes.delete 'title' + block.id = float_id || ((doc_attrs.key? 'sectids') ? (Section.generate_id block.title, document) : nil) + block.level = float_level + break - # forbid loop from executing more than once + # FIXME create another set for "passthrough" styles + # FIXME make this more DRY! + elsif style && style != 'normal' + if PARAGRAPH_STYLES.include?(style) + block_context = style.to_sym + cloaked_context = :paragraph + reader.unshift_line this_line + # advance to block parsing => + break + elsif ADMONITION_STYLES.include?(style) + block_context = :admonition + cloaked_context = :paragraph + reader.unshift_line this_line + # advance to block parsing => + break + elsif block_extensions && extensions.registered_for_block?(style, :paragraph) + block_context = style.to_sym + cloaked_context = :paragraph + reader.unshift_line this_line + # advance to block parsing => break + else + logger.debug message_with_context %(unknown style for paragraph: #{style}), source_location: reader.cursor_at_mark if logger.debug? + style = nil + # continue to process paragraph end end - # either delimited block or styled paragraph - if !block && block_context - # abstract and partintro should be handled by open block - # FIXME kind of hackish...need to sort out how to generalize this - block_context = :open if block_context == :abstract || block_context == :partintro - - case block_context - when :admonition - attributes['name'] = admonition_name = style.downcase - attributes['caption'] ||= document.attributes[%(#{admonition_name}-caption)] - block = build_block(block_context, :compound, terminator, parent, reader, attributes) - - when :comment - build_block(block_context, :skip, terminator, parent, reader, attributes) - return - - when :example - block = build_block(block_context, :compound, terminator, parent, reader, attributes) - - when :listing, :fenced_code, :source - if block_context == :fenced_code - style = attributes['style'] = 'source' - language, linenums = this_line[3..-1].tr(' ', '').split(',', 2) - if !language.nil_or_empty? - attributes['language'] = language - attributes['linenums'] = '' unless linenums.nil_or_empty? - elsif (default_language = document.attributes['source-language']) - attributes['language'] = default_language - end - if !attributes.key?('indent') && document.attributes.key?('source-indent') - attributes['indent'] = document.attributes['source-indent'] - end - terminator = terminator[0..2] - elsif block_context == :source - AttributeList.rekey(attributes, [nil, 'language', 'linenums']) - unless attributes.key? 'language' - if (default_language = document.attributes['source-language']) - attributes['language'] = default_language - end - end - if !attributes.key?('indent') && document.attributes.key?('source-indent') - attributes['indent'] = document.attributes['source-indent'] + reader.unshift_line this_line + + # a literal paragraph: contiguous lines starting with at least one whitespace character + # NOTE style can only be nil or "normal" at this point + if indented && !style + lines = read_paragraph_lines reader, (content_adjacent = skipped == 0 ? options[:list_type] : nil), skip_line_comments: text_only + adjust_indentation! lines + if text_only || content_adjacent == :dlist + # this block gets folded into the list item text + block = Block.new(parent, :paragraph, content_model: :simple, source: lines, attributes: attributes) + else + block = Block.new(parent, :literal, content_model: :verbatim, source: lines, attributes: attributes) + end + # a normal paragraph: contiguous non-blank/non-continuation lines (left-indented or normal style) + else + lines = read_paragraph_lines reader, skipped == 0 && options[:list_type], skip_line_comments: true + # NOTE don't check indented here since it's extremely rare + #if text_only || indented + if text_only + # if [normal] is used over an indented paragraph, shift content to left margin + # QUESTION do we even need to shift since whitespace is normalized by XML in this case? + adjust_indentation! lines if indented && style == 'normal' + block = Block.new(parent, :paragraph, content_model: :simple, source: lines, attributes: attributes) + elsif (ADMONITION_STYLE_HEADS.include? ch0) && (this_line.include? ':') && (AdmonitionParagraphRx =~ this_line) + lines[0] = $' # string after match + attributes['name'] = admonition_name = (attributes['style'] = $1).downcase + attributes['textlabel'] = (attributes.delete 'caption') || doc_attrs[%(#{admonition_name}-caption)] + block = Block.new(parent, :admonition, content_model: :simple, source: lines, attributes: attributes) + elsif md_syntax && ch0 == '>' && this_line.start_with?('> ') + lines.map! {|line| line == '>' ? (line.slice 1, line.length) : ((line.start_with? '> ') ? (line.slice 2, line.length) : line) } + if lines[-1].start_with? '-- ' + credit_line = (credit_line = lines.pop).slice 3, credit_line.length + unless lines.empty? + lines.pop while lines[-1].empty? end end - block = build_block(:listing, :verbatim, terminator, parent, reader, attributes) - - when :literal - block = build_block(block_context, :verbatim, terminator, parent, reader, attributes) - - when :pass - block = build_block(block_context, :raw, terminator, parent, reader, attributes) - - when :stem, :latexmath, :asciimath - if block_context == :stem - attributes['style'] = if (explicit_stem_syntax = attributes[2]) - explicit_stem_syntax.include?('tex') ? 'latexmath' : 'asciimath' - elsif (default_stem_syntax = document.attributes['stem']).nil_or_empty? - 'asciimath' + attributes['style'] = 'quote' + # NOTE will only detect discrete (aka free-floating) headings + # TODO could assume a discrete heading when inside a block context + # FIXME Reader needs to be created w/ line info + block = build_block(:quote, :compound, false, parent, Reader.new(lines), attributes) + if credit_line + attribution, citetitle = (block.apply_subs credit_line).split ', ', 2 + attributes['attribution'] = attribution if attribution + attributes['citetitle'] = citetitle if citetitle + end + elsif ch0 == '"' && lines.size > 1 && (lines[-1].start_with? '-- ') && (lines[-2].end_with? '"') + lines[0] = this_line.slice 1, this_line.length # strip leading quote + credit_line = (credit_line = lines.pop).slice 3, credit_line.length + lines.pop while lines[-1].empty? + lines << lines.pop.chop # strip trailing quote + attributes['style'] = 'quote' + block = Block.new(parent, :quote, content_model: :simple, source: lines, attributes: attributes) + attribution, citetitle = (block.apply_subs credit_line).split ', ', 2 + attributes['attribution'] = attribution if attribution + attributes['citetitle'] = citetitle if citetitle + else + # if [normal] is used over an indented paragraph, shift content to left margin + # QUESTION do we even need to shift since whitespace is normalized by XML in this case? + adjust_indentation! lines if indented && style == 'normal' + block = Block.new(parent, :paragraph, content_model: :simple, source: lines, attributes: attributes) + end + + catalog_inline_anchors((lines.join LF), block, document, reader) + end + + break # forbid loop from executing more than once + end unless delimited_block + + # either delimited block or styled paragraph + unless block + case block_context + when :listing, :source + if block_context == :source || (!attributes[1] && (language = attributes[2] || doc_attrs['source-language'])) + if language + attributes['style'] = 'source' + attributes['language'] = language + AttributeList.rekey attributes, [nil, nil, 'linenums'] + else + AttributeList.rekey attributes, [nil, 'language', 'linenums'] + if doc_attrs.key? 'source-language' + attributes['language'] = doc_attrs['source-language'] + end unless attributes.key? 'language' + end + if attributes['linenums-option'] || doc_attrs['source-linenums-option'] + attributes['linenums'] = '' + end unless attributes.key? 'linenums' + if doc_attrs.key? 'source-indent' + attributes['indent'] = doc_attrs['source-indent'] + end unless attributes.key? 'indent' + end + block = build_block(:listing, :verbatim, terminator, parent, reader, attributes) + when :fenced_code + attributes['style'] = 'source' + if (ll = this_line.length) > 3 + if (comma_idx = (language = this_line.slice 3, ll).index ',') + if comma_idx > 0 + language = (language.slice 0, comma_idx).strip + attributes['linenums'] = '' if comma_idx < ll - 4 else - default_stem_syntax + attributes['linenums'] = '' if ll > 4 end + else + language = language.lstrip end - block = build_block(:stem, :raw, terminator, parent, reader, attributes) - - when :open, :sidebar - block = build_block(block_context, :compound, terminator, parent, reader, attributes) - - when :table - cursor = reader.cursor - block_reader = Reader.new reader.read_lines_until(:terminator => terminator, :skip_line_comments => true), cursor - case terminator.chr - when ',' - attributes['format'] = 'csv' - when ':' - attributes['format'] = 'dsv' - end - block = next_table(block_reader, parent, attributes) - - when :quote, :verse - AttributeList.rekey(attributes, [nil, 'attribution', 'citetitle']) - block = build_block(block_context, (block_context == :verse ? :verbatim : :compound), terminator, parent, reader, attributes) - + end + if language.nil_or_empty? + attributes['language'] = doc_attrs['source-language'] if doc_attrs.key? 'source-language' else - if block_extensions && (extension = extensions.registered_for_block?(block_context, cloaked_context)) - # TODO pass cloaked_context to extension somehow (perhaps a new instance for each cloaked_context?) - if (content_model = extension.config[:content_model]) != :skip - if !(pos_attrs = extension.config[:pos_attrs] || []).empty? - AttributeList.rekey(attributes, [nil].concat(pos_attrs)) - end - if (default_attrs = extension.config[:default_attrs]) - default_attrs.each {|k, v| attributes[k] ||= v } - end + attributes['language'] = language + end + if attributes['linenums-option'] || doc_attrs['source-linenums-option'] + attributes['linenums'] = '' + end unless attributes.key? 'linenums' + if doc_attrs.key? 'source-indent' + attributes['indent'] = doc_attrs['source-indent'] + end unless attributes.key? 'indent' + terminator = terminator.slice 0, 3 + block = build_block(:listing, :verbatim, terminator, parent, reader, attributes) + when :table + block_cursor = reader.cursor + block_reader = Reader.new reader.read_lines_until(terminator: terminator, skip_line_comments: true, context: :table, cursor: :at_mark), block_cursor + # NOTE it's very rare that format is set when using a format hint char, so short-circuit + unless terminator.start_with? '|', '!' + # NOTE infer dsv once all other format hint chars are ruled out + attributes['format'] ||= (terminator.start_with? ',') ? 'csv' : 'dsv' + end + block = parse_table(block_reader, parent, attributes) + when :sidebar + block = build_block(block_context, :compound, terminator, parent, reader, attributes) + when :admonition + attributes['name'] = admonition_name = style.downcase + attributes['textlabel'] = (attributes.delete 'caption') || doc_attrs[%(#{admonition_name}-caption)] + block = build_block(block_context, :compound, terminator, parent, reader, attributes) + when :open, :abstract, :partintro + block = build_block(:open, :compound, terminator, parent, reader, attributes) + when :literal + block = build_block(block_context, :verbatim, terminator, parent, reader, attributes) + when :example + block = build_block(block_context, :compound, terminator, parent, reader, attributes) + when :quote, :verse + AttributeList.rekey(attributes, [nil, 'attribution', 'citetitle']) + block = build_block(block_context, (block_context == :verse ? :verbatim : :compound), terminator, parent, reader, attributes) + when :stem, :latexmath, :asciimath + attributes['style'] = STEM_TYPE_ALIASES[attributes[2] || doc_attrs['stem']] if block_context == :stem + block = build_block(:stem, :raw, terminator, parent, reader, attributes) + when :pass + block = build_block(block_context, :raw, terminator, parent, reader, attributes) + when :comment + build_block(block_context, :skip, terminator, parent, reader, attributes) + attributes.clear + return + else + if block_extensions && (extension = extensions.registered_for_block? block_context, cloaked_context) + unless (content_model = (ext_config = extension.config)[:content_model]) == :skip + unless (positional_attrs = ext_config[:positional_attrs] || ext_config[:pos_attrs]).nil_or_empty? + AttributeList.rekey(attributes, [nil] + positional_attrs) end - block = build_block block_context, content_model, terminator, parent, reader, attributes, :extension => extension - unless block && content_model != :skip - attributes.clear - return + if (default_attrs = ext_config[:default_attrs]) + default_attrs.each {|k, v| attributes[k] ||= v } end - else - # this should only happen if there's a misconfiguration - raise %(Unsupported block type #{block_context} at #{reader.line_info}) + # QUESTION should we clone the extension for each cloaked context and set in config? + attributes['cloaked-context'] = cloaked_context + end + unless (block = build_block block_context, content_model, terminator, parent, reader, attributes, extension: extension) + attributes.clear + return end + else + # this should only happen if there's a misconfiguration + raise %(Unsupported block type #{block_context} at #{reader.cursor}) end end end - # when looking for nested content, one or more line comments, comment - # blocks or trailing attribute lists could leave us without a block, - # so handle accordingly - # REVIEW we may no longer need this nil check # FIXME we've got to clean this up, it's horrible! - if block - block.source_location = source_location if source_location - # REVIEW seems like there is a better way to organize this wrap-up - block.title = attributes['title'] unless block.title? - # FIXME HACK don't hardcode logic for alt, caption and scaledwidth on images down here - if block.context == :image - resolved_target = attributes['target'] - block.document.register(:images, resolved_target) - attributes['alt'] ||= Helpers.basename(resolved_target, true).tr('_-', ' ') - attributes['alt'] = block.sub_specialchars attributes['alt'] - block.assign_caption attributes.delete('caption'), 'figure' - if (scaledwidth = attributes['scaledwidth']) - # append % to scaledwidth if ends in number (no units present) - if (48..57).include?((scaledwidth[-1] || 0).ord) - attributes['scaledwidth'] = %(#{scaledwidth}%) - end - end - else - block.caption ||= attributes.delete('caption') - end - # TODO eventualy remove the style attribute from the attributes hash - #block.style = attributes.delete('style') - block.style = attributes['style'] - # AsciiDoc always use [id] as the reftext in HTML output, - # but I'd like to do better in Asciidoctor - if (block_id = (block.id ||= attributes['id'])) - # TODO sub reftext - document.register(:ids, [block_id, (attributes['reftext'] || (block.title? ? block.title : nil))]) - end - # FIXME remove the need for this update! - block.attributes.update(attributes) unless attributes.empty? - block.lock_in_subs - - #if document.attributes.has_key? :pending_attribute_entries - # document.attributes.delete(:pending_attribute_entries).each do |entry| - # entry.save_to block.attributes - # end - #end - - if block.sub? :callouts - unless (catalog_callouts block.source, document) - # No need to sub callouts if they aren't there - block.remove_sub :callouts - end - end + block.source_location = reader.cursor_at_mark if document.sourcemap + # FIXME title and caption should be assigned when block is constructed (though we need to handle all cases) + if attributes['title'] + block.title = block_title = attributes.delete 'title' + if (caption_attr_name = CAPTION_ATTR_NAMES[block.context]) && document.attributes[caption_attr_name] + block.assign_caption (attributes.delete 'caption') + end + end + # TODO eventually remove the style attribute from the attributes hash + #block.style = attributes.delete 'style' + block.style = attributes['style'] + if (block_id = block.id || (block.id = attributes['id'])) + # convert title to resolve attributes while in scope + block.title if block_title ? (block_title.include? ATTR_REF_HEAD) : block.title? + unless document.register :refs, [block_id, block] + logger.warn message_with_context %(id assigned to block already in use: #{block_id}), source_location: reader.cursor_at_mark + end + end + # FIXME remove the need for this update! + block.update_attributes attributes unless attributes.empty? + block.commit_subs + + #if doc_attrs.key? :pending_attribute_entries + # doc_attrs.delete(:pending_attribute_entries).each do |entry| + # entry.save_to block.attributes + # end + #end + + if block.sub? :callouts + # No need to sub callouts if none are found when cataloging + block.remove_sub :callouts unless catalog_callouts block.source, document end block end - def self.blockquote? lines, first_line = nil - lines.size > 1 && ((first_line || lines[0]).start_with? '"') && - (lines[-1].start_with? '-- ') && (lines[-2].end_with? '"') - end - def self.read_paragraph_lines reader, break_at_list, opts = {} opts[:break_on_blank_lines] = true opts[:break_on_list_continuation] = true @@ -965,74 +941,49 @@ reader.read_lines_until opts, &break_condition end - # Public: Determines whether this line is the start of any of the delimited blocks + # Public: Determines whether this line is the start of a known delimited block. # - # returns the match data if this line is the first line of a delimited block or nil if not - def self.is_delimited_block? line, return_match_data = false + # Returns the BlockMatchData (if return_match_data is true) or true (if return_match_data is false) if this line is + # the start of a delimited block, otherwise nothing. + def self.is_delimited_block? line, return_match_data = nil # highly optimized for best performance - return unless (line_len = line.length) > 1 && (DELIMITED_BLOCK_LEADERS.include? line[0..1]) - # catches open block + return unless (line_len = line.length) > 1 && DELIMITED_BLOCK_HEADS[line.slice 0, 2] + # open block if line_len == 2 tip = line - tl = 2 + tip_len = 2 else - # catches all other delimited blocks, including fenced code - if line_len <= 4 + # all other delimited blocks, including fenced code + if line_len < 5 tip = line - tl = line_len + tip_len = line_len else - tip = line[0..3] - tl = 4 + tip = line.slice 0, (tip_len = 4) end - # special case for fenced code blocks - # REVIEW review this logic - fenced_code = false - if Compliance.markdown_syntax - tip_3 = (tl == 4 ? tip.chop : tip) - if tip_3 == '```' - if tl == 4 && tip.end_with?('`') + if Compliance.markdown_syntax && (tip.start_with? '`') + if tip_len == 4 + if tip == '````' + return + elsif (tip = tip.chop) == '```' + line = tip + line_len = tip_len = 3 + else return end - tip = tip_3 - tl = 3 - fenced_code = true - end - end - - # short circuit if not a fenced code block - return if tl == 3 && !fenced_code - end - - if DELIMITED_BLOCKS.has_key? tip - # tip is the full line when delimiter is minimum length - if tl < 4 || tl == line_len - if return_match_data - context, masq = *DELIMITED_BLOCKS[tip] - BlockMatchData.new(context, masq, tip, tip) - else - true - end - elsif %(#{tip}#{tip[-1..-1] * (line_len - tl)}) == line - if return_match_data - context, masq = *DELIMITED_BLOCKS[tip] - BlockMatchData.new(context, masq, tip, line) + elsif tip == '```' + # keep it else - true + return end - # only enable if/when we decide to support non-congruent block delimiters - #elsif (match = BlockDelimiterRx.match(line)) - # if return_match_data - # context, masq = *DELIMITED_BLOCKS[tip] - # BlockMatchData.new(context, masq, tip, match[0]) - # else - # true - # end - else - nil + elsif tip_len == 3 + return end - else - nil + end + # NOTE line matches the tip when delimiter is minimum length or fenced code + context, masq = DELIMITED_BLOCKS[tip] + if context && (line_len == tip_len || (uniform? (line.slice 1, line_len), DELIMITED_BLOCK_TAILS[tip], (line_len - 1))) + return_match_data ? (BlockMatchData.new context, masq, tip, line) : true end end @@ -1040,25 +991,26 @@ # if terminator is false, that means the all the lines in the reader should be parsed # NOTE could invoke filter in here, before and after parsing def self.build_block(block_context, content_model, terminator, parent, reader, attributes, options = {}) - if content_model == :skip || content_model == :raw - skip_processing = content_model == :skip - parse_as_content_model = :simple + if content_model == :skip + skip_processing, parse_as_content_model = true, :simple + elsif content_model == :raw + skip_processing, parse_as_content_model = false, :simple else - skip_processing = false - parse_as_content_model = content_model + skip_processing, parse_as_content_model = false, content_model end if terminator.nil? if parse_as_content_model == :verbatim - lines = reader.read_lines_until(:break_on_blank_lines => true, :break_on_list_continuation => true) + lines = reader.read_lines_until break_on_blank_lines: true, break_on_list_continuation: true else content_model = :simple if content_model == :compound - lines = read_paragraph_lines reader, false, :skip_line_comments => true, :skip_processing => true + # TODO we could also skip processing if we're able to detect reader is a BlockReader + lines = read_paragraph_lines reader, false, skip_line_comments: true, skip_processing: skip_processing # QUESTION check for empty lines after grabbing lines for simple content model? end block_reader = nil elsif parse_as_content_model != :compound - lines = reader.read_lines_until(:terminator => terminator, :skip_processing => skip_processing) + lines = reader.read_lines_until terminator: terminator, skip_processing: skip_processing, context: block_context, cursor: :at_mark block_reader = nil # terminator is false when reader has already been prepared elsif terminator == false @@ -1066,56 +1018,45 @@ block_reader = reader else lines = nil - cursor = reader.cursor - block_reader = Reader.new reader.read_lines_until(:terminator => terminator, :skip_processing => skip_processing), cursor - end - - if content_model == :skip - attributes.clear - # FIXME we shouldn't be mixing return types - return lines + block_cursor = reader.cursor + block_reader = Reader.new reader.read_lines_until(terminator: terminator, skip_processing: skip_processing, context: block_context, cursor: :at_mark), block_cursor end if content_model == :verbatim + tab_size = (attributes['tabsize'] || parent.document.attributes['tabsize']).to_i if (indent = attributes['indent']) - adjust_indentation! lines, indent, (attributes['tabsize'] || parent.document.attributes['tabsize']) - elsif (tab_size = (attributes['tabsize'] || parent.document.attributes['tabsize']).to_i) > 0 - adjust_indentation! lines, nil, tab_size - end + adjust_indentation! lines, indent.to_i, tab_size + elsif tab_size > 0 + adjust_indentation! lines, -1, tab_size + end + elsif content_model == :skip + # QUESTION should we still invoke process method if extension is specified? + return end if (extension = options[:extension]) # QUESTION do we want to delete the style? attributes.delete('style') - if (block = extension.process_method[parent, block_reader || (Reader.new lines), attributes.dup]) + if (block = extension.process_method[parent, block_reader || (Reader.new lines), attributes.merge]) attributes.replace block.attributes # FIXME if the content model is set to compound, but we only have simple in this context, then # forcefully set the content_model to simple to prevent parsing blocks from children # TODO document this behavior!! - if block.content_model == :compound && !(lines = block.lines).nil_or_empty? + if block.content_model == :compound && !(lines = block.lines).empty? content_model = :compound block_reader = Reader.new lines end else - # FIXME need a test to verify this returns nil at the right time return end else - block = Block.new(parent, block_context, :content_model => content_model, :source => lines, :attributes => attributes) + block = Block.new(parent, block_context, content_model: content_model, source: lines, attributes: attributes) end - # QUESTION should we have an explicit map or can we rely on check for *-caption attribute? - if (attributes.has_key? 'title') && (block.document.attr? %(#{block.context}-caption)) - block.title = attributes.delete 'title' - block.assign_caption attributes.delete('caption') - end + # reader is confined within boundaries of a delimited block, so look for + # blocks until there are no more lines + parse_blocks block_reader, block if content_model == :compound - if content_model == :compound - # we can look for blocks until there are no more lines (and not worry - # about sections) since the reader is confined within the boundaries of a - # delimited block - parse_blocks block_reader, block - end block end @@ -1129,64 +1070,34 @@ # parent - The parent Block to which to attach the parsed blocks # # Returns nothing. - def self.parse_blocks(reader, parent) - while reader.has_more_lines? - block = Parser.next_block(reader, parent) - parent << block if block + def self.parse_blocks(reader, parent, attributes = nil) + if attributes + while ((block = next_block reader, parent, attributes.merge) && parent.blocks << block) || reader.has_more_lines?; end + else + while ((block = next_block reader, parent) && parent.blocks << block) || reader.has_more_lines?; end end + nil end - # Internal: Parse and construct an outline list Block from the current position of the Reader + # Internal: Parse and construct an ordered or unordered list at the current position of the Reader # - # reader - The Reader from which to retrieve the outline list + # reader - The Reader from which to retrieve the list # list_type - A Symbol representing the list type (:olist for ordered, :ulist for unordered) - # parent - The parent Block to which this outline list belongs + # parent - The parent Block to which this list belongs + # style - The block style assigned to this list (optional, default: nil) # - # Returns the Block encapsulating the parsed outline (unordered or ordered) list - def self.next_outline_list(reader, list_type, parent) - list_block = List.new(parent, list_type) - if parent.context == list_type - list_block.level = parent.level + 1 - else - list_block.level = 1 - end - - while reader.has_more_lines? && (match = ListRxMap[list_type].match(reader.peek_line)) - marker = resolve_list_marker(list_type, match[1]) - - # if we are moving to the next item, and the marker is different - # determine if we are moving up or down in nesting - if list_block.items? && marker != list_block.items[0].marker - # assume list is nested by default, but then check to see if we are - # popping out of a nested list by matching an ancestor's list marker - this_item_level = list_block.level + 1 - ancestor = parent - while ancestor.context == list_type - if marker == ancestor.items[0].marker - this_item_level = ancestor.level - break - end - ancestor = ancestor.parent - end - else - this_item_level = list_block.level - end + # Returns the Block encapsulating the parsed unordered or ordered list + def self.parse_list reader, list_type, parent, style + list_block = List.new parent, list_type + list_rx = ListRxMap[list_type] - if !list_block.items? || this_item_level == list_block.level - list_item = next_list_item(reader, list_block, match) - elsif this_item_level < list_block.level - # leave this block - break - elsif this_item_level > list_block.level - # If this next list level is down one from the - # current Block's, append it to content of the current list item - list_block.items[-1] << next_block(reader, list_block) + while reader.has_more_lines? && list_rx =~ reader.peek_line + # NOTE parse_list_item will stop at sibling item or end of list; never sees ancestor items + if (list_item = parse_list_item reader, list_block, $~, $1, style) + list_block.items << list_item end - list_block << list_item if list_item - list_item = nil - - reader.skip_blank_lines + reader.skip_blank_lines || break end list_block @@ -1195,177 +1106,257 @@ # Internal: Catalog any callouts found in the text, but don't process them # # text - The String of text in which to look for callouts - # document - The current document on which the callouts are stored + # document - The current document in which the callouts are stored # # Returns A Boolean indicating whether callouts were found def self.catalog_callouts(text, document) found = false - if text.include? '<' - text.scan(CalloutQuickScanRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - if m[0].chr != '\\' - document.callouts.register(m[2]) - end - # we have to mark as found even if it's escaped so it can be unescaped - found = true - } - end + autonum = 0 + text.scan CalloutScanRx do + document.callouts.register $2 == '.' ? (autonum += 1).to_s : $2 unless $&.start_with? '\\' + # we have to mark as found even if it's escaped so it can be unescaped + found = true + end if text.include? '<' found end - # Internal: Catalog any inline anchors found in the text, but don't process them + # Internal: Catalog a matched inline anchor. + # + # id - The String id of the anchor + # reftext - The optional String reference text of the anchor + # node - The AbstractNode parent node of the anchor node + # location - The source location (file and line) where the anchor was found + # doc - The document to which the node belongs; computed from node if not specified + # + # Returns nothing + def self.catalog_inline_anchor id, reftext, node, location, doc = node.document + reftext = doc.sub_attributes reftext if reftext && (reftext.include? ATTR_REF_HEAD) + unless doc.register :refs, [id, (Inline.new node, :anchor, reftext, type: :ref, id: id)] + location = location.cursor if Reader === location + logger.warn message_with_context %(id assigned to anchor already in use: #{id}), source_location: location + end + nil + end + + # Internal: Catalog any inline anchors found in the text (but don't convert) # # text - The String text in which to look for inline anchors - # document - The current document on which the references are stored + # block - The block in which the references should be searched + # document - The current Document on which the references are stored # # Returns nothing - def self.catalog_inline_anchors(text, document) - if text.include? '[' - text.scan(InlineAnchorRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - next if m[0].start_with? '\\' - id = m[1] || m[3] - reftext = m[2] || m[4] - # enable if we want to allow double quoted values - #id = id.sub(DoubleQuotedRx, '\2') - #if reftext - # reftext = reftext.sub(DoubleQuotedMultiRx, '\2') - #end - document.register(:ids, [id, reftext]) - } + def self.catalog_inline_anchors text, block, document, reader + text.scan InlineAnchorScanRx do + if (id = $1) + if (reftext = $2) + next if (reftext.include? ATTR_REF_HEAD) && (reftext = document.sub_attributes reftext).empty? + end + else + id = $3 + if (reftext = $4) + reftext = reftext.gsub '\]', ']' if reftext.include? ']' + next if (reftext.include? ATTR_REF_HEAD) && (reftext = document.sub_attributes reftext).empty? + end + end + unless document.register :refs, [id, (Inline.new block, :anchor, reftext, type: :ref, id: id)] + location = reader.cursor_at_mark + if (offset = ($`.count LF) + (($&.start_with? LF) ? 1 : 0)) > 0 + (location = location.dup).advance offset + end + logger.warn message_with_context %(id assigned to anchor already in use: #{id}), source_location: location + end + end if (text.include? '[[') || (text.include? 'or:') + nil + end + + # Internal: Catalog the bibliography inline anchor found in the start of the list item (but don't convert) + # + # id - The String id of the anchor + # reftext - The optional String reference text of the anchor + # node - The AbstractNode parent node of the anchor node + # reader - The source Reader for the current Document, positioned at the current list item + # + # Returns nothing + def self.catalog_inline_biblio_anchor id, reftext, node, reader + # QUESTION should we sub attributes in reftext (like with regular anchors)? + unless node.document.register :refs, [id, (Inline.new node, :anchor, reftext && %([#{reftext}]), type: :bibref, id: id)] + logger.warn message_with_context %(id assigned to bibliography anchor already in use: #{id}), source_location: reader.cursor end nil end # Internal: Parse and construct a description list Block from the current position of the Reader # - # reader - The Reader from which to retrieve the labeled list + # reader - The Reader from which to retrieve the description list # match - The Regexp match for the head of the list - # parent - The parent Block to which this labeled list belongs + # parent - The parent Block to which this description list belongs # - # Returns the Block encapsulating the parsed labeled list - def self.next_labeled_list(reader, match, parent) - list_block = List.new(parent, :dlist) - previous_pair = nil - # allows us to capture until we find a labeled item - # that uses the same delimiter (::, :::, :::: or ;;) + # Returns the Block encapsulating the parsed description list + def self.parse_description_list reader, match, parent + list_block = List.new parent, :dlist + # detects a description list item that uses the same delimiter (::, :::, :::: or ;;) sibling_pattern = DescriptionListSiblingRx[match[2]] + list_block.items << (current_pair = parse_list_item reader, list_block, match, sibling_pattern) - # NOTE skip the match on the first time through as we've already done it (emulates begin...while) - while match || (reader.has_more_lines? && (match = sibling_pattern.match(reader.peek_line))) - term, item = next_list_item(reader, list_block, match, sibling_pattern) - if previous_pair && !previous_pair[-1] - previous_pair.pop - previous_pair[0] << term - previous_pair << item + while reader.has_more_lines? && sibling_pattern =~ reader.peek_line + next_pair = parse_list_item reader, list_block, $~, sibling_pattern + if current_pair[1] + list_block.items << (current_pair = next_pair) else - # FIXME this misses the automatic parent assignment - list_block.items << (previous_pair = [[term], item]) + current_pair[0] << next_pair[0][0] + current_pair[1] = next_pair[1] end + end + + list_block + end + + # Internal: Parse and construct a callout list Block from the current position of the Reader and + # advance the document callouts catalog to the next list. + # + # reader - The Reader from which to retrieve the callout list. + # match - The Regexp match containing the head of the list. + # parent - The parent Block to which this callout list belongs. + # callouts - The document callouts catalog. + # + # Returns the Block that represents the parsed callout list. + def self.parse_callout_list reader, match, parent, callouts + list_block = List.new(parent, :colist) + next_index = 1 + autonum = 0 + # NOTE skip the match on the first time through as we've already done it (emulates begin...while) + while match || ((match = CalloutListRx.match reader.peek_line) && reader.mark) + if (num = match[1]) == '.' + num = (autonum += 1).to_s + end + # might want to move this check to a validate method + unless num == next_index.to_s + logger.warn message_with_context %(callout list item index: expected #{next_index}, got #{num}), source_location: reader.cursor_at_mark + end + if (list_item = parse_list_item reader, list_block, match, '<1>') + list_block.items << list_item + if (coids = callouts.callout_ids list_block.items.size).empty? + logger.warn message_with_context %(no callout found for <#{list_block.items.size}>), source_location: reader.cursor_at_mark + else + list_item.attributes['coids'] = coids + end + end + next_index += 1 match = nil end + callouts.next_list list_block end - # Internal: Parse and construct the next ListItem for the current bulleted - # (unordered or ordered) list Block, callout lists included, or the next - # term ListItem and description ListItem pair for the labeled list Block. - # - # First collect and process all the lines that constitute the next list - # item for the parent list (according to its type). Next, parse those lines - # into blocks and associate them with the ListItem (in the case of a - # labeled list, the description ListItem). Finally, fold the first block - # into the item's text attribute according to rules described in ListItem. + # Internal: Parse and construct the next ListItem (unordered, ordered, or callout list) or next + # term ListItem and description ListItem pair (description list) for the specified list Block. + # + # First, collect and process all the lines that constitute the next list item for the specified + # list (according to its type). Next, create a ListItem (in the case of a description list, a + # description ListItem), parse the lines into blocks, and associate those blocks with that + # ListItem. Finally, fold the first block into the item's text attribute according to rules + # described in ListItem. # # reader - The Reader from which to retrieve the next list item - # list_block - The parent list Block of this ListItem. Also provides access to the list type. - # match - The match Array which contains the marker and text (first-line) of the ListItem - # sibling_trait - The list marker or the Regexp to match a sibling item - # - # Returns the next ListItem or ListItem pair (depending on the list type) - # for the parent list Block. - def self.next_list_item(reader, list_block, match, sibling_trait = nil) + # list_block - The parent list Block for this ListItem. Also provides access to the list type. + # match - The MatchData that contains the list item marker and first line text of the ListItem + # sibling_trait - The trait to match a sibling list item. For ordered and unordered lists, this is + # a String marker (e.g., '**' or 'ii)'). For description lists, this is a Regexp + # marker pattern. + # style - The block style assigned to this list (optional, default: nil) + # + # Returns the next ListItem or [[ListItem], ListItem] pair (description list) for the parent list Block. + def self.parse_list_item(reader, list_block, match, sibling_trait, style = nil) if (list_type = list_block.context) == :dlist - list_term = ListItem.new(list_block, match[1]) - list_item = ListItem.new(list_block, match[3]) - has_text = !match[3].nil_or_empty? - else - # Create list item using first line as the text of the list item - text = match[2] - checkbox = false - if list_type == :ulist && text.start_with?('[') - if text.start_with?('[ ] ') - checkbox = true - checked = false - text = text[3..-1].lstrip - elsif text.start_with?('[x] ') || text.start_with?('[*] ') - checkbox = true - checked = true - text = text[3..-1].lstrip - end - end - list_item = ListItem.new(list_block, text) - - if checkbox - # FIXME checklist never makes it into the options attribute - list_block.attributes['checklist-option'] = '' - list_item.attributes['checkbox'] = '' - list_item.attributes['checked'] = '' if checked + dlist = true + list_term = ListItem.new(list_block, (term_text = match[1])) + if term_text.start_with?('[[') && LeadingInlineAnchorRx =~ term_text + catalog_inline_anchor $1, ($2 || $'.lstrip), list_term, reader + end + has_text = true if (item_text = match[3]) + list_item = ListItem.new(list_block, item_text) + if list_block.document.sourcemap + list_term.source_location = reader.cursor + if has_text + list_item.source_location = list_term.source_location + else + sourcemap_assignment_deferred = true + end end - - sibling_trait ||= resolve_list_marker(list_type, match[1], list_block.items.size, true, reader) - list_item.marker = sibling_trait + else has_text = true + list_item = ListItem.new(list_block, (item_text = match[2])) + list_item.source_location = reader.cursor if list_block.document.sourcemap + if list_type == :ulist + list_item.marker = sibling_trait + if item_text.start_with?('[') + if style && style == 'bibliography' + if InlineBiblioAnchorRx =~ item_text + catalog_inline_biblio_anchor $1, $2, list_item, reader + end + elsif item_text.start_with?('[[') + if LeadingInlineAnchorRx =~ item_text + catalog_inline_anchor $1, $2, list_item, reader + end + elsif item_text.start_with?('[ ] ', '[x] ', '[*] ') + list_block.set_option 'checklist' + list_item.attributes['checkbox'] = '' + list_item.attributes['checked'] = '' unless item_text.start_with? '[ ' + list_item.text = item_text.slice(4, item_text.length) + end + end + elsif list_type == :olist + sibling_trait, implicit_style = resolve_ordered_list_marker(sibling_trait, (ordinal = list_block.items.size), true, reader) + list_item.marker = sibling_trait + if ordinal == 0 && !style + # using list level makes more sense, but we don't track it + # basing style on marker level is compliant with AsciiDoc Python + list_block.style = implicit_style || ((ORDERED_LIST_STYLES[sibling_trait.length - 1] || 'arabic').to_s) + end + if item_text.start_with?('[[') && LeadingInlineAnchorRx =~ item_text + catalog_inline_anchor $1, $2, list_item, reader + end + else # :colist + list_item.marker = sibling_trait + if item_text.start_with?('[[') && LeadingInlineAnchorRx =~ item_text + catalog_inline_anchor $1, $2, list_item, reader + end + end end - # first skip the line with the marker / term - reader.advance - cursor = reader.cursor - list_item_reader = Reader.new read_lines_for_list_item(reader, list_type, sibling_trait, has_text), cursor + # first skip the line with the marker / term (it gets put back onto the reader by next_block) + reader.shift + block_cursor = reader.cursor + list_item_reader = Reader.new read_lines_for_list_item(reader, list_type, sibling_trait, has_text), block_cursor if list_item_reader.has_more_lines? + list_item.source_location = block_cursor if sourcemap_assignment_deferred + # NOTE peek on the other side of any comment lines comment_lines = list_item_reader.skip_line_comments - subsequent_line = list_item_reader.peek_line - list_item_reader.unshift_lines comment_lines unless comment_lines.empty? - - if !subsequent_line.nil? - continuation_connects_first_block = subsequent_line.empty? - # if there's no continuation connecting the first block, then - # treat the lines as paragraph text (activated when has_text = false) - if !continuation_connects_first_block && list_type != :dlist - has_text = false + if (subsequent_line = list_item_reader.peek_line) + list_item_reader.unshift_lines comment_lines unless comment_lines.empty? + unless subsequent_line.empty? + content_adjacent = true + # treat lines as paragraph text if continuation does not connect first block (i.e., has_text = nil) + has_text = nil unless dlist end - content_adjacent = !continuation_connects_first_block && !subsequent_line.empty? - else - continuation_connects_first_block = false - content_adjacent = false end - # only relevant for :dlist - options = {:text => !has_text} + # reader is confined to boundaries of list, which means only blocks will be found (no sections) + if (block = next_block(list_item_reader, list_item, {}, text_only: has_text ? nil : true, list_type: list_type)) + list_item.blocks << block + end - # we can look for blocks until there are no more lines (and not worry - # about sections) since the reader is confined within the boundaries of a - # list while list_item_reader.has_more_lines? - if (new_block = next_block(list_item_reader, list_item, {}, options)) - list_item << new_block + if (block = next_block(list_item_reader, list_item, {}, list_type: list_type)) + list_item.blocks << block end end - list_item.fold_first(continuation_connects_first_block, content_adjacent) + list_item.fold_first if content_adjacent && (first_block = list_item.blocks[0]) && first_block.context == :paragraph end - if list_type == :dlist - unless list_item.text? || list_item.blocks? - list_item = nil - end - [list_term, list_item] - else - list_item - end + dlist ? [[list_term], (list_item.text? || list_item.blocks? ? list_item : nil)] : list_item end # Internal: Collect the lines belonging to the current list item, navigating @@ -1379,7 +1370,7 @@ # list_type - The Symbol context of the list (:ulist, :olist, :colist or :dlist) # sibling_trait - A Regexp that matches a sibling of this list item or String list marker # of the items in this list (default: nil) - # has_text - Whether the list item has text defined inline (always true except for labeled lists) + # has_text - Whether the list item has text defined inline (always true except for description lists) # # Returns an Array of lines belonging to the current list item. def self.read_lines_for_list_item(reader, list_type, sibling_trait = nil, has_text = true) @@ -1399,6 +1390,8 @@ # it gets associated with the outermost block detached_continuation = nil + dlist = list_type == :dlist + while reader.has_more_lines? this_line = reader.read_line @@ -1435,7 +1428,7 @@ buffer << this_line # grab all the lines in the block, leaving the delimiters in place # we're being more strict here about the terminator, but I think that's a good thing - buffer.concat reader.read_lines_until(:terminator => match.terminator, :read_last_line => true) + buffer.concat reader.read_lines_until(terminator: match.terminator, read_last_line: true, context: nil) continuation = :inactive else break @@ -1443,7 +1436,7 @@ # technically BlockAttributeLineRx only breaks if ensuing line is not a list item # which really means BlockAttributeLineRx only breaks if it's acting as a block delimiter # FIXME to be AsciiDoc compliant, we shouldn't break if style in attribute line is "literal" (i.e., [literal]) - elsif list_type == :dlist && continuation != :active && BlockAttributeLineRx =~ this_line + elsif dlist && continuation != :active && (BlockAttributeLineRx.match? this_line) break else if continuation == :active && !this_line.empty? @@ -1451,24 +1444,23 @@ # two entry points into one) # if we don't process it as a whole, then a line in it that looks like a # list item will throw off the exit from it - if LiteralParagraphRx =~ this_line + if LiteralParagraphRx.match? this_line reader.unshift_line this_line - buffer.concat reader.read_lines_until( - :preserve_last_line => true, - :break_on_blank_lines => true, - :break_on_list_continuation => true) {|line| + if dlist # we may be in an indented list disguised as a literal paragraph # so we need to make sure we don't slurp up a legitimate sibling - list_type == :dlist && is_sibling_list_item?(line, list_type, sibling_trait) - } + buffer.concat reader.read_lines_until(preserve_last_line: true, break_on_blank_lines: true, break_on_list_continuation: true) {|line| is_sibling_list_item? line, list_type, sibling_trait } + else + buffer.concat reader.read_lines_until(preserve_last_line: true, break_on_blank_lines: true, break_on_list_continuation: true) + end continuation = :inactive # let block metadata play out until we find the block - elsif BlockTitleRx =~ this_line || BlockAttributeLineRx =~ this_line || AttributeEntryRx =~ this_line + elsif (BlockTitleRx.match? this_line) || (BlockAttributeLineRx.match? this_line) || (AttributeEntryRx.match? this_line) buffer << this_line else - if nested_list_type = (within_nested_list ? [:dlist] : NESTABLE_LIST_CONTEXTS).find {|ctx| ListRxMap[ctx] =~ this_line } + if nested_list_type = (within_nested_list ? [:dlist] : NESTABLE_LIST_CONTEXTS).find {|ctx| ListRxMap[ctx].match? this_line } within_nested_list = true - if nested_list_type == :dlist && $~[3].nil_or_empty? + if nested_list_type == :dlist && $3.nil_or_empty? # get greedy again has_text = false end @@ -1476,13 +1468,13 @@ buffer << this_line continuation = :inactive end - elsif !prev_line.nil? && prev_line.empty? + elsif prev_line && prev_line.empty? # advance to the next line of content if this_line.empty? - reader.skip_blank_lines - this_line = reader.read_line - # if we hit eof or a sibling, stop reading - break if this_line.nil? || is_sibling_list_item?(this_line, list_type, sibling_trait) + # stop reading if we reach eof + break unless (this_line = reader.skip_blank_lines && reader.read_line) + # stop reading if we hit a sibling list item + break if is_sibling_list_item? this_line, list_type, sibling_trait end if this_line == LIST_CONTINUATION @@ -1499,22 +1491,21 @@ elsif nested_list_type = NESTABLE_LIST_CONTEXTS.find {|ctx| ListRxMap[ctx] =~ this_line } buffer << this_line within_nested_list = true - if nested_list_type == :dlist && $~[3].nil_or_empty? + if nested_list_type == :dlist && $3.nil_or_empty? # get greedy again has_text = false end # slurp up any literal paragraph offset by blank lines # NOTE we have to check for indented list items first - elsif LiteralParagraphRx =~ this_line + elsif LiteralParagraphRx.match? this_line reader.unshift_line this_line - buffer.concat reader.read_lines_until( - :preserve_last_line => true, - :break_on_blank_lines => true, - :break_on_list_continuation => true) {|line| + if dlist # we may be in an indented list disguised as a literal paragraph # so we need to make sure we don't slurp up a legitimate sibling - list_type == :dlist && is_sibling_list_item?(line, list_type, sibling_trait) - } + buffer.concat reader.read_lines_until(preserve_last_line: true, break_on_blank_lines: true, break_on_list_continuation: true) {|line| is_sibling_list_item? line, list_type, sibling_trait } + else + buffer.concat reader.read_lines_until(preserve_last_line: true, break_on_blank_lines: true, break_on_list_continuation: true) + end else break end @@ -1526,10 +1517,10 @@ end end else - has_text = true if !this_line.empty? + has_text = true unless this_line.empty? if nested_list_type = (within_nested_list ? [:dlist] : NESTABLE_LIST_CONTEXTS).find {|ctx| ListRxMap[ctx] =~ this_line } within_nested_list = true - if nested_list_type == :dlist && $~[3].nil_or_empty? + if nested_list_type == :dlist && $3.nil_or_empty? # get greedy again has_text = false end @@ -1542,19 +1533,19 @@ reader.unshift_line this_line if this_line - if detached_continuation - buffer.delete_at detached_continuation - end - - # strip trailing blank lines to prevent empty blocks - buffer.pop while !buffer.empty? && buffer[-1].empty? + buffer[detached_continuation] = '' if detached_continuation - # We do need to replace the optional trailing continuation - # a blank line would have served the same purpose in the document - buffer.pop if !buffer.empty? && buffer[-1] == LIST_CONTINUATION - - #warn "BUFFER[#{list_type},#{sibling_trait}]>#{buffer * EOL}#{buffer.inspect} 1 ? 'section' : 'chapter') + elsif doctype == 'manpage' && (sect_title.casecmp 'synopsis') == 0 + sect_name, sect_special = 'synopsis', true else - # generate an id if one was not *embedded* in the heading line - # or as an anchor above the section - section.id ||= section.generate_id + sect_name = 'section' end - if section.id - # TODO sub reftext - section.document.register(:ids, [section.id, (attributes['reftext'] || section.title)]) + section = Section.new parent, sect_level + section.id, section.title, section.sectname, section.source_location = sect_id, sect_title, sect_name, source_location + if sect_special + section.special = true + if sect_numbered + section.numbered = true + elsif document.attributes['sectnums'] == 'all' + section.numbered = book && sect_level == 1 ? :chapter : true + end + elsif document.attributes['sectnums'] && sect_level > 0 + # NOTE a special section here is guaranteed to be nested in another section + section.numbered = section.special ? parent.numbered && true : true + elsif book && sect_level == 0 && document.attributes['partnums'] + section.numbered = true + end + + # generate an ID if one was not embedded or specified as anchor above section title + if (id = section.id || (section.id = (document.attributes.key? 'sectids') ? (generated_id = Section.generate_id section.title, document) : nil)) + # convert title to resolve attributes while in scope + section.title if sect_title.include? ATTR_REF_HEAD unless generated_id + unless document.register :refs, [id, section] + logger.warn message_with_context %(id assigned to section already in use: #{id}), source_location: (reader.cursor_at_line reader.lineno - (sect_atx ? 1 : 2)) + end end + section.update_attributes(attributes) reader.skip_blank_lines section end - # Private: Get the Integer section level based on the characters - # used in the ASCII line under the section title. - # - # line - the String line from under the section title. - def self.section_level(line) - SECTION_LEVELS[line.chr] - end - - #-- - # = is level 0, == is level 1, etc. - def self.single_line_section_level(marker) - marker.length - 1 - end - # Internal: Checks if the next line on the Reader is a section title # # reader - the source Reader # attributes - a Hash of attributes collected above the current line # - # returns the section level if the Reader is positioned at a section title, - # false otherwise + # Returns the Integer section level if the Reader is positioned at a section title or nil otherwise def self.is_next_line_section?(reader, attributes) - if !(val = attributes[1]).nil? && ((ord_0 = val[0].ord) == 100 || ord_0 == 102) && val =~ FloatingTitleStyleRx - return false + if (style = attributes[1]) && (style == 'discrete' || style == 'float') + return + elsif Compliance.underline_style_section_titles + next_lines = reader.peek_lines 2, style && style == 'comment' + is_section_title?(next_lines[0] || '', next_lines[1]) + else + atx_section_title?(reader.peek_line || '') end - return false unless reader.has_more_lines? - Compliance.underline_style_section_titles ? is_section_title?(*reader.peek_lines(2)) : is_section_title?(reader.peek_line) end # Internal: Convenience API for checking if the next line on the Reader is the document title # - # reader - the source Reader - # attributes - a Hash of attributes collected above the current line + # reader - the source Reader + # attributes - a Hash of attributes collected above the current line + # leveloffset - an Integer (or integer String value) the represents the current leveloffset # # returns true if the Reader is positioned at the document title, false otherwise - def self.is_next_line_document_title?(reader, attributes) - is_next_line_section?(reader, attributes) == 0 + def self.is_next_line_doctitle? reader, attributes, leveloffset + if leveloffset + (sect_level = is_next_line_section? reader, attributes) && (sect_level + leveloffset.to_i == 0) + else + (is_next_line_section? reader, attributes) == 0 + end end - # Public: Checks if these lines are a section title + # Public: Checks whether the lines given are an atx or setext section title. # - # line1 - the first line as a String - # line2 - the second line as a String (default: nil) + # line1 - [String] candidate title. + # line2 - [String] candidate underline (default: nil). # - # returns the section level if these lines are a section title, - # false otherwise + # Returns the [Integer] section level if these lines are a section title, otherwise nothing. def self.is_section_title?(line1, line2 = nil) - if (level = is_single_line_section_title?(line1)) - level - elsif line2 && (level = is_two_line_section_title?(line1, line2)) - level - else - false - end + atx_section_title?(line1) || (line2.nil_or_empty? ? nil : setext_section_title?(line1, line2)) end - def self.is_single_line_section_title?(line1) - first_char = line1 ? line1.chr : nil - if (first_char == '=' || (Compliance.markdown_syntax && first_char == '#')) && - (match = AtxSectionRx.match(line1)) - single_line_section_level match[1] - else - false + # Checks whether the line given is an atx section title. + # + # The level returned is 1 less than number of leading markers. + # + # line - [String] candidate title with leading atx marker. + # + # Returns the [Integer] section level if this line is an atx section title, otherwise nothing. + def self.atx_section_title? line + if Compliance.markdown_syntax ? ((line.start_with? '=', '#') && ExtAtxSectionTitleRx =~ line) : + ((line.start_with? '=') && AtxSectionTitleRx =~ line) + $1.length - 1 end end - def self.is_two_line_section_title?(line1, line2) - if line1 && line2 && SECTION_LEVELS.has_key?(line2.chr) && - line2 =~ SetextSectionLineRx && line1 =~ SetextSectionTitleRx && - # chomp so that a (non-visible) endline does not impact calculation - (line_length(line1) - line_length(line2)).abs <= 1 - section_level line2 - else - false + # Checks whether the lines given are an setext section title. + # + # line1 - [String] candidate title + # line2 - [String] candidate underline + # + # Returns the [Integer] section level if these lines are an setext section title, otherwise nothing. + def self.setext_section_title? line1, line2 + if (level = SETEXT_SECTION_LEVELS[line2_ch0 = line2.chr]) && (uniform? line2, line2_ch0, (line2_len = line2.length)) && + (SetextSectionTitleRx.match? line1) && (line1.length - line2_len).abs < 2 + level end end # Internal: Parse the section title from the current position of the reader # - # Parse a single or double-line section title. After this method is called, + # Parse an atx (single-line) or setext (underlined) section title. After this method is called, # the Reader will be positioned at the line after the section title. # - # reader - the source reader, positioned at a section title - # document- the current document + # For efficiency, we don't reuse methods internally that check for a section title. + # + # reader - the source [Reader], positioned at a section title. + # document - the current [Document]. # # Examples # # reader.lines # # => ["Foo", "~~~"] # - # id, reftext, title, level, single = parse_section_title(reader, document) + # id, reftext, title, level, atx = parse_section_title(reader, document) # # title # # => "Foo" @@ -1715,13 +1714,13 @@ # # => 2 # id # # => nil - # single + # atx # # => false # # line1 # # => "==== Foo" # - # id, reftext, title, level, single = parse_section_title(reader, document) + # id, reftext, title, level, atx = parse_section_title(reader, document) # # title # # => "Foo" @@ -1729,65 +1728,39 @@ # # => 3 # id # # => nil - # single + # atx # # => true # - # returns an Array of [String, String, Integer, String, Boolean], representing the - # id, reftext, title, level and line count of the Section, or nil. - # - #-- - # NOTE for efficiency, we don't reuse methods that check for a section title - def self.parse_section_title(reader, document) - line1 = reader.read_line - sect_id = nil - sect_title = nil - sect_level = -1 + # Returns an 5-element [Array] containing the id (String), reftext (String), + # title (String), level (Integer), and flag (Boolean) indicating whether an + # atx section title was matched, or nothing. + def self.parse_section_title(reader, document, sect_id = nil) sect_reftext = nil - single_line = true + line1 = reader.read_line - first_char = line1.chr - if (first_char == '=' || (Compliance.markdown_syntax && first_char == '#')) && - (match = AtxSectionRx.match(line1)) - sect_level = single_line_section_level match[1] - sect_title = match[2] - if sect_title.end_with?(']]') && (anchor_match = InlineSectionAnchorRx.match(sect_title)) - if anchor_match[2].nil? - sect_title = anchor_match[1] - sect_id = anchor_match[3] - sect_reftext = anchor_match[4] - end - end - elsif Compliance.underline_style_section_titles - if (line2 = reader.peek_line(true)) && SECTION_LEVELS.has_key?(line2.chr) && line2 =~ SetextSectionLineRx && - (name_match = SetextSectionTitleRx.match(line1)) && - # chomp so that a (non-visible) endline does not impact calculation - (line_length(line1) - line_length(line2)).abs <= 1 - sect_title = name_match[1] - if sect_title.end_with?(']]') && (anchor_match = InlineSectionAnchorRx.match(sect_title)) - if anchor_match[2].nil? - sect_title = anchor_match[1] - sect_id = anchor_match[3] - sect_reftext = anchor_match[4] - end - end - sect_level = section_level line2 - single_line = false - reader.advance - end - end - if sect_level >= 0 - sect_level += document.attr('leveloffset', 0).to_i + if Compliance.markdown_syntax ? ((line1.start_with? '=', '#') && ExtAtxSectionTitleRx =~ line1) : + ((line1.start_with? '=') && AtxSectionTitleRx =~ line1) + # NOTE level is 1 less than number of line markers + sect_level, sect_title, atx = $1.length - 1, $2, true + if sect_title.end_with?(']]') && InlineSectionAnchorRx =~ sect_title && !$1 # escaped + sect_title, sect_id, sect_reftext = (sect_title.slice 0, sect_title.length - $&.length), $2, $3 + end unless sect_id + elsif Compliance.underline_style_section_titles && (line2 = reader.peek_line(true)) && + (sect_level = SETEXT_SECTION_LEVELS[line2_ch0 = line2.chr]) && (uniform? line2, line2_ch0, (line2_len = line2.length)) && + (sect_title = SetextSectionTitleRx =~ line1 && $1) && (line1.length - line2_len).abs < 2 + atx = false + if sect_title.end_with?(']]') && InlineSectionAnchorRx =~ sect_title && !$1 # escaped + sect_title, sect_id, sect_reftext = (sect_title.slice 0, sect_title.length - $&.length), $2, $3 + end unless sect_id + reader.shift + else + raise %(Unrecognized section at #{reader.cursor_at_prev_line}) + end + if document.attr? 'leveloffset' + sect_level += (document.attr 'leveloffset').to_i + sect_level = 0 if sect_level < 0 end - [sect_id, sect_reftext, sect_title, sect_level, single_line] - end - - # Public: Calculate the number of unicode characters in the line, excluding the endline - # - # line - the String to calculate - # - # returns the number of unicode characters in the line - def self.line_length(line) - FORCE_UNICODE_LINE_LENGTH ? line.scan(UnicodeCharScanRx).length : line.length + [sect_id, sect_reftext, sect_title, sect_level, atx] end # Public: Consume and parse the two header lines (line 1 = author info, line 2 = revision info). @@ -1801,38 +1774,35 @@ # Examples # # data = ["Author Name \n", "v1.0, 2012-12-21: Coincide w/ end of world.\n"] - # parse_header_metadata(Reader.new data, nil, :normalize => true) - # # => {'author' => 'Author Name', 'firstname' => 'Author', 'lastname' => 'Name', 'email' => 'author@example.org', - # # 'revnumber' => '1.0', 'revdate' => '2012-12-21', 'revremark' => 'Coincide w/ end of world.'} + # parse_header_metadata(Reader.new data, nil, normalize: true) + # # => { 'author' => 'Author Name', 'firstname' => 'Author', 'lastname' => 'Name', 'email' => 'author@example.org', + # # 'revnumber' => '1.0', 'revdate' => '2012-12-21', 'revremark' => 'Coincide w/ end of world.' } def self.parse_header_metadata(reader, document = nil) - # NOTE this will discard away any comment lines, but not skip blank lines - process_attribute_entries(reader, document) + doc_attrs = document && document.attributes + # NOTE this will discard any comment lines, but not skip blank lines + process_attribute_entries reader, document - metadata = {} - implicit_author = nil - implicit_authors = nil + metadata, implicit_author, implicit_authorinitials = implicit_authors = {}, nil, nil if reader.has_more_lines? && !reader.next_line_empty? - author_metadata = process_authors reader.read_line - - unless author_metadata.empty? + unless (author_metadata = process_authors reader.read_line).empty? if document # apply header subs and assign to document author_metadata.each do |key, val| - unless document.attributes.has_key? key - document.attributes[key] = ::String === val ? (document.apply_header_subs val) : val - end + # NOTE the attributes substitution only applies for the email record + doc_attrs[key] = ::String === val ? (document.apply_header_subs val) : val unless doc_attrs.key? key end - implicit_author = document.attributes['author'] - implicit_authors = document.attributes['authors'] + implicit_author = doc_attrs['author'] + implicit_authorinitials = doc_attrs['authorinitials'] + implicit_authors = doc_attrs['authors'] end metadata = author_metadata end # NOTE this will discard any comment lines, but not skip blank lines - process_attribute_entries(reader, document) + process_attribute_entries reader, document rev_metadata = {} @@ -1843,7 +1813,7 @@ unless (component = match[2].strip).empty? # version must begin with 'v' if date is absent if !match[1] && (component.start_with? 'v') - rev_metadata['revnumber'] = component[1..-1] + rev_metadata['revnumber'] = component.slice 1, component.length else rev_metadata['revdate'] = component end @@ -1859,8 +1829,8 @@ if document # apply header subs and assign to document rev_metadata.each do |key, val| - unless document.attributes.has_key? key - document.attributes[key] = document.apply_header_subs(val) + unless doc_attrs.key? key + doc_attrs[key] = document.apply_header_subs val end end end @@ -1869,44 +1839,62 @@ end # NOTE this will discard any comment lines, but not skip blank lines - process_attribute_entries(reader, document) + process_attribute_entries reader, document reader.skip_blank_lines + else + author_metadata = {} end + # process author attribute entries that override (or stand in for) the implicit author line if document - # process author attribute entries that override (or stand in for) the implicit author line - author_metadata = nil - if document.attributes.has_key?('author') && - (author_line = document.attributes['author']) != implicit_author + if doc_attrs.key?('author') && (author_line = doc_attrs['author']) != implicit_author # do not allow multiple, process as names only author_metadata = process_authors author_line, true, false - elsif document.attributes.has_key?('authors') && - (author_line = document.attributes['authors']) != implicit_authors + author_metadata.delete 'authorinitials' if doc_attrs['authorinitials'] != implicit_authorinitials + elsif doc_attrs.key?('authors') && (author_line = doc_attrs['authors']) != implicit_authors # allow multiple, process as names only author_metadata = process_authors author_line, true else - authors = [] - author_key = %(author_#{authors.size + 1}) - while document.attributes.has_key? author_key - authors << document.attributes[author_key] - author_key = %(author_#{authors.size + 1}) - end - if authors.size == 1 - # do not allow multiple, process as names only - author_metadata = process_authors authors[0], true, false - elsif authors.size > 1 - # allow multiple, process as names only - author_metadata = process_authors authors.join('; '), true + authors, author_idx, author_key, explicit, sparse = [], 1, 'author_1', false, false + while doc_attrs.key? author_key + # only use indexed author attribute if value is different + # leaves corner case if line matches with underscores converted to spaces; use double space to force + if (author_override = doc_attrs[author_key]) == author_metadata[author_key] + authors << nil + sparse = true + else + authors << author_override + explicit = true + end + author_key = %(author_#{author_idx += 1}) + end + if explicit + # rebuild implicit author names to reparse + authors.each_with_index do |author, idx| + unless author + authors[idx] = [ + author_metadata[%(firstname_#{name_idx = idx + 1})], + author_metadata[%(middlename_#{name_idx})], + author_metadata[%(lastname_#{name_idx})] + ].compact.map {|it| it.tr ' ', '_' }.join ' ' + end + end if sparse + # process as names only + author_metadata = process_authors authors, true, false + else + author_metadata = {} end end - if author_metadata - document.attributes.update author_metadata + if author_metadata.empty? + metadata['authorcount'] ||= (doc_attrs['authorcount'] = 0) + else + doc_attrs.update author_metadata # special case - if !document.attributes.has_key?('email') && document.attributes.has_key?('email_1') - document.attributes['email'] = document.attributes['email_1'] + if !doc_attrs.key?('email') && doc_attrs.key?('email_1') + doc_attrs['email'] = doc_attrs['email_1'] end end end @@ -1923,69 +1911,66 @@ # semicolon-separated entries in the author line (default: true) # # returns a Hash of author metadata - def self.process_authors(author_line, names_only = false, multiple = true) + def self.process_authors author_line, names_only = false, multiple = true author_metadata = {} - keys = ['author', 'authorinitials', 'firstname', 'middlename', 'lastname', 'email'] - author_entries = multiple ? (author_line.split ';').map {|line| line.strip } : [author_line] - author_entries.each_with_index do |author_entry, idx| + author_idx = 0 + (multiple && (author_line.include? ';') ? (author_line.split AuthorDelimiterRx) : [*author_line]).each do |author_entry| next if author_entry.empty? key_map = {} - if idx == 0 - keys.each do |key| - key_map[key.to_sym] = key - end + if (author_idx += 1) == 1 + AuthorKeys.each {|key| key_map[key.to_sym] = key } else - keys.each do |key| - key_map[key.to_sym] = %(#{key}_#{idx + 1}) - end + AuthorKeys.each {|key| key_map[key.to_sym] = %(#{key}_#{author_idx}) } end - segments = nil - if names_only - # splitting on ' ' collapses repeating spaces uniformly - # `split ' ', 3` causes odd behavior in Opal; see https://github.com/asciidoctor/asciidoctor.js/issues/159 - if (segments = author_entry.split ' ').size > 3 - segments = segments[0..1].push(segments[2..-1].join ' ') + if names_only # when parsing an attribute value + # QUESTION should we rstrip author_entry? + if author_entry.include? '<' + author_metadata[key_map[:author]] = author_entry.tr('_', ' ') + author_entry = author_entry.gsub XmlSanitizeRx, '' + end + # NOTE split names and collapse repeating whitespace (split drops any leading whitespace) + if (segments = author_entry.split nil, 3).size == 3 + segments << (segments.pop.squeeze ' ') end elsif (match = AuthorInfoLineRx.match(author_entry)) - segments = match.to_a - segments.shift + (segments = match.to_a).shift end - unless segments.nil? - author_metadata[key_map[:firstname]] = fname = segments[0].tr('_', ' ') - author_metadata[key_map[:author]] = fname - author_metadata[key_map[:authorinitials]] = fname[0, 1] - if !segments[1].nil? && !segments[2].nil? - author_metadata[key_map[:middlename]] = mname = segments[1].tr('_', ' ') - author_metadata[key_map[:lastname]] = lname = segments[2].tr('_', ' ') - author_metadata[key_map[:author]] = [fname, mname, lname].join ' ' - author_metadata[key_map[:authorinitials]] = [fname[0, 1], mname[0, 1], lname[0, 1]].join - elsif !segments[1].nil? - author_metadata[key_map[:lastname]] = lname = segments[1].tr('_', ' ') - author_metadata[key_map[:author]] = [fname, lname].join ' ' - author_metadata[key_map[:authorinitials]] = [fname[0, 1], lname[0, 1]].join - end - author_metadata[key_map[:email]] = segments[3] unless names_only || segments[3].nil? - else - author_metadata[key_map[:author]] = author_metadata[key_map[:firstname]] = fname = author_entry.strip.tr_s(' ', ' ') - author_metadata[key_map[:authorinitials]] = fname[0, 1] - end - - author_metadata['authorcount'] = idx + 1 - # only assign the _1 attributes if there are multiple authors - if idx == 1 - keys.each do |key| - author_metadata[%(#{key}_1)] = author_metadata[key] if author_metadata.has_key? key + if segments + author = author_metadata[key_map[:firstname]] = fname = segments[0].tr('_', ' ') + author_metadata[key_map[:authorinitials]] = fname.chr + if segments[1] + if segments[2] + author_metadata[key_map[:middlename]] = mname = segments[1].tr('_', ' ') + author_metadata[key_map[:lastname]] = lname = segments[2].tr('_', ' ') + author = fname + ' ' + mname + ' ' + lname + author_metadata[key_map[:authorinitials]] = %(#{fname.chr}#{mname.chr}#{lname.chr}) + else + author_metadata[key_map[:lastname]] = lname = segments[1].tr('_', ' ') + author = fname + ' ' + lname + author_metadata[key_map[:authorinitials]] = %(#{fname.chr}#{lname.chr}) + end end + author_metadata[key_map[:author]] ||= author + author_metadata[key_map[:email]] = segments[3] unless names_only || !segments[3] + else + author_metadata[key_map[:author]] = author_metadata[key_map[:firstname]] = fname = author_entry.squeeze(' ').strip + author_metadata[key_map[:authorinitials]] = fname.chr end - if idx == 0 + + if author_idx == 1 author_metadata['authors'] = author_metadata[key_map[:author]] else + # only assign the _1 attributes once we see the second author + if author_idx == 2 + AuthorKeys.each {|key| author_metadata[%(#{key}_1)] = author_metadata[key] if author_metadata.key? key } + end author_metadata['authors'] = %(#{author_metadata['authors']}, #{author_metadata[key_map[:author]]}) end end + author_metadata['authorcount'] = author_idx author_metadata end @@ -1995,18 +1980,18 @@ # blank lines and comments. # # reader - the source reader - # parent - the parent to which the lines belong + # document - the current Document # attributes - a Hash of attributes in which any metadata found will be stored (default: {}) # options - a Hash of options to control processing: (default: {}) - # * :text indicates that lexer is only looking for text content + # * :text_only indicates that parser is only looking for text content # and thus the block title should not be captured # # returns the Hash of attributes including any metadata found - def self.parse_block_metadata_lines(reader, parent, attributes = {}, options = {}) - while parse_block_metadata_line(reader, parent, attributes, options) + def self.parse_block_metadata_lines reader, document, attributes = {}, options = {} + while parse_block_metadata_line reader, document, attributes, options # discard the line just processed - reader.advance - reader.skip_blank_lines + reader.shift + reader.skip_blank_lines || break end attributes end @@ -2024,124 +2009,138 @@ # If the line contains block metadata, the method returns true, otherwise false. # # reader - the source reader - # parent - the parent of the current line + # document - the current Document # attributes - a Hash of attributes in which any metadata found will be stored # options - a Hash of options to control processing: (default: {}) - # * :text indicates that lexer is only looking for text content - # and thus the block title should not be captured + # * :text_only indicates the parser is only looking for text content, + # thus neither a block title or attribute entry should be captured # - # returns true if the line contains metadata, otherwise false - def self.parse_block_metadata_line(reader, parent, attributes, options = {}) - return false unless reader.has_more_lines? - next_line = reader.peek_line - if (commentish = next_line.start_with?('//')) && (match = CommentBlockRx.match(next_line)) - terminator = match[0] - reader.read_lines_until(:skip_first_line => true, :preserve_last_line => true, :terminator => terminator, :skip_processing => true) - elsif commentish && CommentLineRx =~ next_line - # do nothing, we'll skip it - elsif !options[:text] && next_line.start_with?(':') && (match = AttributeEntryRx.match(next_line)) - process_attribute_entry(reader, parent, attributes, match) - elsif (in_square_brackets = next_line.start_with?('[') && next_line.end_with?(']')) && (match = BlockAnchorRx.match(next_line)) - unless match[1].nil_or_empty? - attributes['id'] = match[1] - # AsciiDoc always uses [id] as the reftext in HTML output, - # but I'd like to do better in Asciidoctor - # registration is deferred until the block or section is processed - attributes['reftext'] = match[2] unless match[2].nil? - end - elsif in_square_brackets && (match = BlockAttributeListRx.match(next_line)) - parent.document.parse_attributes(match[1], [], :sub_input => true, :into => attributes) - # NOTE title doesn't apply to section, but we need to stash it for the first block - # TODO should issue an error if this is found above the document title - elsif !options[:text] && (match = BlockTitleRx.match(next_line)) - attributes['title'] = match[1] - else - return false + # returns true if the line contains metadata, otherwise falsy + def self.parse_block_metadata_line reader, document, attributes, options = {} + if (next_line = reader.peek_line) && + (options[:text_only] ? (next_line.start_with? '[', '/') : (normal = next_line.start_with? '[', '.', '/', ':')) + if next_line.start_with? '[' + if next_line.start_with? '[[' + if (next_line.end_with? ']]') && BlockAnchorRx =~ next_line + # NOTE registration of id and reftext is deferred until block is processed + attributes['id'] = $1 + if (reftext = $2) + attributes['reftext'] = (reftext.include? ATTR_REF_HEAD) ? (document.sub_attributes reftext) : reftext + end + return true + end + elsif (next_line.end_with? ']') && BlockAttributeListRx =~ next_line + current_style = attributes[1] + # extract id, role, and options from first positional attribute and remove, if present + if (document.parse_attributes $1, [], sub_input: true, sub_result: true, into: attributes)[1] + attributes[1] = (parse_style_attribute attributes, reader) || current_style + end + return true + end + elsif normal && (next_line.start_with? '.') + if BlockTitleRx =~ next_line + # NOTE title doesn't apply to section, but we need to stash it for the first block + # TODO should issue an error if this is found above the document title + attributes['title'] = $1 + return true + end + elsif !normal || (next_line.start_with? '/') + if next_line == '//' + return true + elsif normal && (uniform? next_line, '/', (ll = next_line.length)) + unless ll == 3 + reader.read_lines_until terminator: next_line, skip_first_line: true, preserve_last_line: true, skip_processing: true, context: :comment + return true + end + else + return true unless next_line.start_with? '///' + end if next_line.start_with? '//' + # NOTE the final condition can be consolidated into single line + elsif normal && (next_line.start_with? ':') && AttributeEntryRx =~ next_line + process_attribute_entry reader, document, attributes, $~ + return true + end end - - true end - def self.process_attribute_entries(reader, parent, attributes = nil) + # Process consecutive attribute entry lines, ignoring adjacent line comments and comment blocks. + # + # Returns nothing + def self.process_attribute_entries reader, document, attributes = nil reader.skip_comment_lines - while process_attribute_entry(reader, parent, attributes) + while process_attribute_entry reader, document, attributes # discard line just processed - reader.advance + reader.shift reader.skip_comment_lines end end - def self.process_attribute_entry(reader, parent, attributes = nil, match = nil) - match ||= (reader.has_more_lines? ? AttributeEntryRx.match(reader.peek_line) : nil) - if match - name = match[1] - unless (value = match[2] || '').empty? - if value.end_with?(line_continuation = LINE_CONTINUATION) || - value.end_with?(line_continuation = LINE_CONTINUATION_LEGACY) - value = value.chop.rstrip - while reader.advance - break if (next_line = reader.peek_line.strip).empty? - if (keep_open = next_line.end_with? line_continuation) - next_line = next_line.chop.rstrip - end - separator = (value.end_with? LINE_BREAK) ? EOL : ' ' - value = %(#{value}#{separator}#{next_line}) - break unless keep_open - end + def self.process_attribute_entry reader, document, attributes = nil, match = nil + if match || (match = reader.has_more_lines? ? (AttributeEntryRx.match reader.peek_line) : nil) + if (value = match[2]).nil_or_empty? + value = '' + elsif value.end_with? LINE_CONTINUATION, LINE_CONTINUATION_LEGACY + con, value = (value.slice value.length - 2, 2), (value.slice 0, value.length - 2).rstrip + while reader.advance && !(next_line = reader.peek_line || '').empty? + next_line = next_line.lstrip + next_line = (next_line.slice 0, next_line.length - 2).rstrip if (keep_open = next_line.end_with? con) + value = %(#{value}#{(value.end_with? HARD_LINE_BREAK) ? LF : ' '}#{next_line}) + break unless keep_open end end - store_attribute(name, value, (parent ? parent.document : nil), attributes) + store_attribute match[1], value, document, attributes true - else - false end end # Public: Store the attribute in the document and register attribute entry if accessible # - # name - the String name of the attribute to store + # name - the String name of the attribute to store; + # if name begins or ends with !, it signals to remove the attribute with that root name # value - the String value of the attribute to store # doc - the Document being parsed # attrs - the attributes for the current context # - # returns a 2-element array containing the attribute name and value - def self.store_attribute(name, value, doc = nil, attrs = nil) + # returns a 2-element array containing the resolved attribute name (minus the ! indicator) and value + def self.store_attribute name, value, doc = nil, attrs = nil # TODO move processing of attribute value to utility method - if name.end_with?('!') - # a nil value signals the attribute should be deleted (undefined) - value = nil + if name.end_with? '!' + # a nil value signals the attribute should be deleted (unset) name = name.chop - elsif name.start_with?('!') - # a nil value signals the attribute should be deleted (undefined) value = nil - name = name[1..-1] + elsif name.start_with? '!' + # a nil value signals the attribute should be deleted (unset) + name = (name.slice 1, name.length) + value = nil + end + + if (name = sanitize_attribute_name name) == 'numbered' + name = 'sectnums' + elsif name == 'hardbreaks' + name = 'hardbreaks-option' end - name = sanitize_attribute_name(name) - accessible = true if doc - # alias numbered attribute to sectnums - if name == 'numbered' - name = 'sectnums' - # support relative leveloffset values - elsif name == 'leveloffset' - if value - case value.chr - when '+' - value = ((doc.attr 'leveloffset', 0).to_i + (value[1..-1] || 0).to_i).to_s - when '-' - value = ((doc.attr 'leveloffset', 0).to_i - (value[1..-1] || 0).to_i).to_s - end + if value + if name == 'leveloffset' + # support relative leveloffset values + if value.start_with? '+' + value = ((doc.attr 'leveloffset', 0).to_i + (value.slice 1, value.length).to_i).to_s + elsif value.start_with? '-' + value = ((doc.attr 'leveloffset', 0).to_i - (value.slice 1, value.length).to_i).to_s + end + end + # QUESTION should we set value to locked value if set_attribute returns false? + if (resolved_value = doc.set_attribute name, value) + value = resolved_value + (Document::AttributeEntry.new name, value).save_to attrs if attrs end + elsif (doc.delete_attribute name) && attrs + (Document::AttributeEntry.new name, value).save_to attrs end - accessible = value ? doc.set_attribute(name, value) : doc.delete_attribute(name) - end - - if accessible && attrs - # NOTE lookup resolved value (resolution occurs inside set_attribute) - value = doc.attributes[name] if value - Document::AttributeEntry.new(name, value).save_to(attrs) + elsif attrs + (Document::AttributeEntry.new name, value).save_to attrs end [name, value] @@ -2164,12 +2163,12 @@ # # Returns the String 0-index marker for this list item def self.resolve_list_marker(list_type, marker, ordinal = 0, validate = false, reader = nil) - if list_type == :olist && !marker.start_with?('.') - resolve_ordered_list_marker(marker, ordinal, validate, reader) - elsif list_type == :colist - '<1>' - else + if list_type == :ulist marker + elsif list_type == :olist + resolve_ordered_list_marker(marker, ordinal, validate, reader)[0] + else # :colist + '<1>' end end @@ -2190,53 +2189,56 @@ # Examples # # marker = 'B.' - # Parser.resolve_ordered_list_marker(marker, 1, true) - # # => 'A.' + # Parser.resolve_ordered_list_marker(marker, 1, true, reader) + # # => ['A.', :upperalpha] + # + # marker = '.' + # Parser.resolve_ordered_list_marker(marker, 1, true, reader) + # # => ['.'] # - # Returns the String of the first marker in this number series + # Returns a tuple that contains the String of the first marker in this number + # series and the implicit list style, if applicable def self.resolve_ordered_list_marker(marker, ordinal = 0, validate = false, reader = nil) - number_style = ORDERED_LIST_STYLES.find {|s| OrderedListMarkerRxMap[s] =~ marker } - expected = actual = nil - case number_style - when :arabic - if validate - expected = ordinal + 1 - actual = marker.to_i - end - marker = '1.' - when :loweralpha - if validate - expected = ('a'[0].ord + ordinal).chr - actual = marker.chomp('.') - end - marker = 'a.' - when :upperalpha - if validate - expected = ('A'[0].ord + ordinal).chr - actual = marker.chomp('.') - end - marker = 'A.' - when :lowerroman - if validate - # TODO report this in roman numerals; see https://github.com/jamesshipton/roman-numeral/blob/master/lib/roman_numeral.rb - expected = ordinal + 1 - actual = roman_numeral_to_int(marker.chomp(')')) - end - marker = 'i)' - when :upperroman - if validate - # TODO report this in roman numerals; see https://github.com/jamesshipton/roman-numeral/blob/master/lib/roman_numeral.rb - expected = ordinal + 1 - actual = roman_numeral_to_int(marker.chomp(')')) - end - marker = 'I)' + return [marker] if marker.start_with? '.' + # NOTE case statement is guaranteed to match one of the conditions + case (style = ORDERED_LIST_STYLES.find {|s| OrderedListMarkerRxMap[s].match? marker }) + when :arabic + if validate + expected = ordinal + 1 + actual = marker.to_i # remove trailing . and coerce to int + end + marker = '1.' + when :loweralpha + if validate + expected = ('a'[0].ord + ordinal).chr + actual = marker.chop # remove trailing . + end + marker = 'a.' + when :upperalpha + if validate + expected = ('A'[0].ord + ordinal).chr + actual = marker.chop # remove trailing . + end + marker = 'A.' + when :lowerroman + if validate + expected = Helpers.int_to_roman(ordinal + 1).downcase + actual = marker.chop # remove trailing ) + end + marker = 'i)' + when :upperroman + if validate + expected = Helpers.int_to_roman(ordinal + 1) + actual = marker.chop # remove trailing ) + end + marker = 'I)' end if validate && expected != actual - warn %(asciidoctor: WARNING: #{reader.line_info}: list item index: expected #{expected}, got #{actual}) + logger.warn message_with_context %(list item index: expected #{expected}, got #{actual}), source_location: reader.cursor end - marker + [marker, style] end # Internal: Determine whether the this line is a sibling list item @@ -2246,25 +2248,12 @@ # list_type - The context of the list (:olist, :ulist, :colist, :dlist) # sibling_trait - The String marker for the list or the Regexp to match a sibling # - # Returns a Boolean indicating whether this line is a sibling list item given - # the criteria provided - def self.is_sibling_list_item?(line, list_type, sibling_trait) + # Returns a Boolean indicating whether this line is a sibling list item given the criteria provided + def self.is_sibling_list_item? line, list_type, sibling_trait if ::Regexp === sibling_trait - matcher = sibling_trait - expected_marker = false + sibling_trait.match? line else - matcher = ListRxMap[list_type] - expected_marker = sibling_trait - end - - if (m = matcher.match(line)) - if expected_marker - expected_marker == resolve_list_marker(list_type, m[1]) - else - true - end - else - false + ListRxMap[list_type] =~ line && sibling_trait == (resolve_list_marker list_type, $1) end end @@ -2275,112 +2264,119 @@ # attributes - attributes captured from above this Block # # returns an instance of Asciidoctor::Table parsed from the provided reader - def self.next_table(table_reader, parent, attributes) + def self.parse_table(table_reader, parent, attributes) table = Table.new(parent, attributes) - if (attributes.has_key? 'title') - table.title = attributes.delete 'title' - table.assign_caption attributes.delete('caption') - end if (attributes.key? 'cols') && !(colspecs = parse_colspecs attributes['cols']).empty? table.create_columns colspecs explicit_colspecs = true - else - explicit_colspecs = false end - skipped = table_reader.skip_blank_lines - - parser_ctx = Table::ParserContext.new(table_reader, table, attributes) - skip_implicit_header = (attributes.key? 'header-option') || (attributes.key? 'noheader-option') - loop_idx = -1 - while table_reader.has_more_lines? - loop_idx += 1 - line = table_reader.read_line - - if !skip_implicit_header && skipped == 0 && loop_idx == 0 && - !(next_line = table_reader.peek_line).nil? && next_line.empty? - table.has_header_option = true - attributes['header-option'] = '' - attributes['options'] = (attributes.key? 'options') ? %(#{attributes['options']},header) : 'header' - end - - if parser_ctx.format == 'psv' + skipped = table_reader.skip_blank_lines || 0 + parser_ctx = Table::ParserContext.new table_reader, table, attributes + format, loop_idx, implicit_header_boundary = parser_ctx.format, -1, nil + implicit_header = true unless skipped > 0 || attributes['header-option'] || attributes['noheader-option'] + + while (line = table_reader.read_line) + if (beyond_first = (loop_idx += 1) > 0) && line.empty? + line = nil + implicit_header_boundary += 1 if implicit_header_boundary + elsif format == 'psv' if parser_ctx.starts_with_delimiter? line - line = line[1..-1] - # push an empty cell spec if boundary at start of line + line = line.slice 1, line.length + # push empty cell spec if cell boundary appears at start of line parser_ctx.close_open_cell + implicit_header_boundary = nil if implicit_header_boundary else - next_cellspec, line = parse_cellspec(line, :start, parser_ctx.delimiter) - # if the cell spec is not null, then we're at a cell boundary - if !next_cellspec.nil? + next_cellspec, line = parse_cellspec line, :start, parser_ctx.delimiter + # if cellspec is not nil, we're at a cell boundary + if next_cellspec parser_ctx.close_open_cell next_cellspec + implicit_header_boundary = nil if implicit_header_boundary + # otherwise, the cell continues from previous line + elsif implicit_header_boundary && implicit_header_boundary == loop_idx + implicit_header, implicit_header_boundary = false, nil + end + end + end + + unless beyond_first + table_reader.mark + # NOTE implicit header is offset by at least one blank line; implicit_header_boundary tracks size of gap + if implicit_header + if table_reader.has_more_lines? && table_reader.peek_line.empty? + implicit_header_boundary = 1 else - # QUESTION do we not advance to next line? if so, when will we if we came into this block? + implicit_header = false end end end - seen = false - while !seen || !line.empty? - seen = true - if (m = parser_ctx.match_delimiter(line)) - if parser_ctx.format == 'csv' - if parser_ctx.buffer_has_unclosed_quotes?(m.pre_match) - # throw it back, it's too small - line = parser_ctx.skip_matched_delimiter(m) - next + # this loop is used for flow control; internal logic controls how many times it executes + while true + if line && (m = parser_ctx.match_delimiter line) + pre_match, post_match = m.pre_match, m.post_match + case format + when 'csv' + if parser_ctx.buffer_has_unclosed_quotes? pre_match + parser_ctx.skip_past_delimiter pre_match + break if (line = post_match).empty? + redo + end + parser_ctx.buffer = %(#{parser_ctx.buffer}#{pre_match}) + when 'dsv' + if pre_match.end_with? '\\' + parser_ctx.skip_past_escaped_delimiter pre_match + if (line = post_match).empty? + parser_ctx.buffer = %(#{parser_ctx.buffer}#{LF}) + parser_ctx.keep_cell_open + break + end + redo end - else - if m.pre_match.end_with? '\\' - # skip over escaped delimiter - # handle special case when end of line is reached (see issue #1306) - if (line = parser_ctx.skip_matched_delimiter(m, true)).empty? - parser_ctx.buffer = %(#{parser_ctx.buffer}#{EOL}) + parser_ctx.buffer = %(#{parser_ctx.buffer}#{pre_match}) + else # psv + if pre_match.end_with? '\\' + parser_ctx.skip_past_escaped_delimiter pre_match + if (line = post_match).empty? + parser_ctx.buffer = %(#{parser_ctx.buffer}#{LF}) parser_ctx.keep_cell_open break end - next + redo end - end - - if parser_ctx.format == 'psv' - next_cellspec, cell_text = parse_cellspec(m.pre_match, :end) + next_cellspec, cell_text = parse_cellspec pre_match parser_ctx.push_cellspec next_cellspec parser_ctx.buffer = %(#{parser_ctx.buffer}#{cell_text}) - else - parser_ctx.buffer = %(#{parser_ctx.buffer}#{m.pre_match}) - end - - if (line = m.post_match).empty? - # hack to prevent dropping empty cell found at end of line (see issue #1106) - seen = false end - + # don't break if empty to preserve empty cell found at end of line (see issue #1106) + line = nil if (line = post_match).empty? parser_ctx.close_cell else - # no other delimiters to see here - # suck up this line into the buffer and move on - parser_ctx.buffer = %(#{parser_ctx.buffer}#{line}#{EOL}) - # QUESTION make stripping endlines in csv data an option? (unwrap-option?) - if parser_ctx.format == 'csv' - parser_ctx.buffer = %(#{parser_ctx.buffer.rstrip} ) - end - line = '' - if parser_ctx.format == 'psv' || (parser_ctx.format == 'csv' && - parser_ctx.buffer_has_unclosed_quotes?) - parser_ctx.keep_cell_open - else + # no other delimiters to see here; suck up this line into the buffer and move on + parser_ctx.buffer = %(#{parser_ctx.buffer}#{line}#{LF}) + case format + when 'csv' + if parser_ctx.buffer_has_unclosed_quotes? + implicit_header, implicit_header_boundary = false, nil if implicit_header_boundary && loop_idx == 0 + parser_ctx.keep_cell_open + else + parser_ctx.close_cell true + end + when 'dsv' parser_ctx.close_cell true + else # psv + parser_ctx.keep_cell_open end + break end end - skipped = table_reader.skip_blank_lines unless parser_ctx.cell_open? - - unless table_reader.has_more_lines? - # NOTE may have already closed cell in csv or dsv table (see previous call to parser_ctx.close_cell(true)) - parser_ctx.close_cell true if parser_ctx.cell_open? + # NOTE cell may already be closed if table format is csv or dsv + if parser_ctx.cell_open? + parser_ctx.close_cell true unless table_reader.has_more_lines? + else + table_reader.skip_blank_lines || break end end @@ -2388,6 +2384,11 @@ table.assign_column_widths end + if implicit_header + table.has_header_option = true + attributes['header-option'] = '' + end + table.partition_header_footer attributes table @@ -2405,7 +2406,7 @@ # returns a Hash of attributes that specify how to format # and layout the cells in the table. def self.parse_colspecs records - records = records.tr ' ', '' if records.include? ' ' + records = records.delete ' ' if records.include? ' ' # check for deprecated syntax: single number, equal column spread if records == records.to_i.to_s return ::Array.new(records.to_i) { { 'width' => 1 } } @@ -2413,7 +2414,7 @@ specs = [] # NOTE -1 argument ensures we don't drop empty records - records.split(',', -1).each {|record| + ((records.include? ',') ? (records.split ',', -1) : (records.split ';', -1)).each do |record| if record.empty? specs << { 'width' => 1 } # TODO might want to use scan rather than this mega-regexp @@ -2422,32 +2423,33 @@ if m[2] # make this an operation colspec, rowspec = m[2].split '.' - if !colspec.nil_or_empty? && Table::ALIGNMENTS[:h].has_key?(colspec) - spec['halign'] = Table::ALIGNMENTS[:h][colspec] + if !colspec.nil_or_empty? && TableCellHorzAlignments.key?(colspec) + spec['halign'] = TableCellHorzAlignments[colspec] end - if !rowspec.nil_or_empty? && Table::ALIGNMENTS[:v].has_key?(rowspec) - spec['valign'] = Table::ALIGNMENTS[:v][rowspec] + if !rowspec.nil_or_empty? && TableCellVertAlignments.key?(rowspec) + spec['valign'] = TableCellVertAlignments[rowspec] end end - # to_i permits us to support percentage width by stripping the % - # NOTE this is slightly out of compliance w/ AsciiDoc, but makes way more sense - spec['width'] = (m[3] ? m[3].to_i : 1) + if (width = m[3]) + # to_i will strip the optional % + spec['width'] = width == '~' ? -1 : width.to_i + else + spec['width'] = 1 + end # make this an operation - if m[4] && Table::TEXT_STYLES.has_key?(m[4]) - spec['style'] = Table::TEXT_STYLES[m[4]] + if m[4] && TableCellStyles.key?(m[4]) + spec['style'] = TableCellStyles[m[4]] end if m[1] - 1.upto(m[1].to_i) { - specs << spec.dup - } + 1.upto(m[1].to_i) { specs << spec.merge } else specs << spec end end - } + end specs end @@ -2462,14 +2464,12 @@ # # returns the Hash of attributes that indicate how to layout # and style this cell in the table. - def self.parse_cellspec(line, pos = :start, delimiter = nil) - m = nil - rest = '' + def self.parse_cellspec(line, pos = :end, delimiter = nil) + m, rest = nil, '' - case pos - when :start + if pos == :start if line.include? delimiter - spec_part, rest = line.split delimiter, 2 + spec_part, delimiter, rest = line.partition delimiter if (m = CellSpecStartRx.match spec_part) return [{}, rest] if m[0].empty? else @@ -2478,7 +2478,7 @@ else return [nil, line] end - when :end + else # pos == :end if (m = CellSpecEndRx.match line) # NOTE return the line stripped of trailing whitespace if no cellspec is found in this case return [{}, line.rstrip] if m[0].lstrip.empty? @@ -2503,16 +2503,16 @@ if m[3] colspec, rowspec = m[3].split '.' - if !colspec.nil_or_empty? && Table::ALIGNMENTS[:h].has_key?(colspec) - spec['halign'] = Table::ALIGNMENTS[:h][colspec] + if !colspec.nil_or_empty? && TableCellHorzAlignments.key?(colspec) + spec['halign'] = TableCellHorzAlignments[colspec] end - if !rowspec.nil_or_empty? && Table::ALIGNMENTS[:v].has_key?(rowspec) - spec['valign'] = Table::ALIGNMENTS[:v][rowspec] + if !rowspec.nil_or_empty? && TableCellVertAlignments.key?(rowspec) + spec['valign'] = TableCellVertAlignments[rowspec] end end - if m[4] && Table::TEXT_STYLES.has_key?(m[4]) - spec['style'] = Table::TEXT_STYLES[m[4]] + if m[4] && TableCellStyles.key?(m[4]) + spec['style'] = TableCellStyles[m[4]] end [spec, rest] @@ -2522,215 +2522,213 @@ # # Parse the first positional attribute to extract the style, role and id # parts, assign the values to their cooresponding attribute keys and return - # both the original style attribute and the parsed value from the first - # positional attribute. + # the parsed style from the first positional attribute. # # attributes - The Hash of attributes to process and update # # Examples # # puts attributes - # => {1 => "abstract#intro.lead%fragment", "style" => "preamble"} + # => { 1 => "abstract#intro.lead%fragment", "style" => "preamble" } # # parse_style_attribute(attributes) - # => ["abstract", "preamble"] + # => "abstract" # # puts attributes - # => {1 => "abstract#intro.lead", "style" => "abstract", "id" => "intro", - # "role" => "lead", "options" => ["fragment"], "fragment-option" => ''} + # => { 1 => "abstract#intro.lead%fragment", "style" => "abstract", "id" => "intro", + # "role" => "lead", "options" => "fragment", "fragment-option" => '' } # - # Returns a two-element Array of the parsed style from the - # first positional attribute and the original style that was - # replaced - def self.parse_style_attribute(attributes, reader = nil) - original_style = attributes['style'] - raw_style = attributes[1] - # NOTE spaces are not allowed in shorthand, so if we find one, this ain't shorthand - if raw_style && !raw_style.include?(' ') && Compliance.shorthand_property_syntax - type = :style - collector = [] - parsed = {} - # QUESTION should this be a private method? (though, it's never called if shorthand isn't used) - save_current = lambda { - if collector.empty? - if type != :style - warn %(asciidoctor: WARNING:#{reader.nil? ? nil : " #{reader.prev_line_info}:"} invalid empty #{type} detected in style attribute) - end - else - case type - when :role, :option - parsed[type] ||= [] - parsed[type].push collector.join - when :id - if parsed.has_key? :id - warn %(asciidoctor: WARNING:#{reader.nil? ? nil : " #{reader.prev_line_info}:"} multiple ids detected in style attribute) - end - parsed[type] = collector.join - else - parsed[type] = collector.join - end - collector = [] - end - } + # Returns the String style parsed from the first positional attribute + def self.parse_style_attribute attributes, reader = nil + # NOTE spaces are not allowed in shorthand, so if we detect one, this ain't no shorthand + if (raw_style = attributes[1]) && !raw_style.include?(' ') && Compliance.shorthand_property_syntax + name = nil + accum = '' + parsed_attrs = {} raw_style.each_char do |c| - if c == '.' || c == '#' || c == '%' - save_current.call - case c - when '.' - type = :role - when '#' - type = :id - when '%' - type = :option - end + case c + when '.' + yield_buffered_attribute parsed_attrs, name, accum, reader + accum = '' + name = :role + when '#' + yield_buffered_attribute parsed_attrs, name, accum, reader + accum = '' + name = :id + when '%' + yield_buffered_attribute parsed_attrs, name, accum, reader + accum = '' + name = :option else - collector.push c + accum = accum + c end end # small optimization if no shorthand is found - if type == :style - parsed_style = attributes['style'] = raw_style - else - save_current.call + if name + yield_buffered_attribute parsed_attrs, name, accum, reader - if parsed.has_key? :style - parsed_style = attributes['style'] = parsed[:style] - else - parsed_style = nil + if (parsed_style = parsed_attrs[:style]) + attributes['style'] = parsed_style end - if parsed.has_key? :id - attributes['id'] = parsed[:id] + attributes['id'] = parsed_attrs[:id] if parsed_attrs.key? :id + + if parsed_attrs.key? :role + attributes['role'] = (existing_role = attributes['role']).nil_or_empty? ? (parsed_attrs[:role].join ' ') : %(#{existing_role} #{parsed_attrs[:role].join ' '}) end - if parsed.has_key? :role - attributes['role'] = parsed[:role] * ' ' + if parsed_attrs.key? :option + (opts = parsed_attrs[:option]).each {|opt| attributes[%(#{opt}-option)] = '' } end - if parsed.has_key? :option - (options = parsed[:option]).each do |option| - attributes[%(#{option}-option)] = '' - end - if (existing_opts = attributes['options']) - attributes['options'] = (options + existing_opts.split(',')) * ',' + parsed_style + else + attributes['style'] = raw_style + end + else + attributes['style'] = raw_style + end + end + + # Internal: Save the collected attribute (:id, :option, :role, or nil for :style) in the attribute Hash. + def self.yield_buffered_attribute attrs, name, value, reader + if name + if value.empty? + if reader + logger.warn message_with_context %(invalid empty #{name} detected in style attribute), source_location: reader.cursor_at_prev_line + else + logger.warn %(invalid empty #{name} detected in style attribute) + end + elsif name == :id + if attrs.key? :id + if reader + logger.warn message_with_context 'multiple ids detected in style attribute', source_location: reader.cursor_at_prev_line else - attributes['options'] = options * ',' + logger.warn 'multiple ids detected in style attribute' end end + attrs[name] = value + else + (attrs[name] ||= []) << value end - - [parsed_style, original_style] else - attributes['style'] = raw_style - [raw_style, original_style] + attrs[:style] = value unless value.empty? end + nil end - # Remove the block indentation (the leading whitespace equal to the amount of - # leading whitespace of the least indented line), then replace tabs with - # spaces (using proper tab expansion logic) and, finally, indent the lines by - # the amount specified. + # Remove the block indentation (the amount of whitespace of the least indented line), replace tabs with spaces (using + # proper tab expansion logic) and, finally, indent the lines by the margin width. Modifies the input Array directly. # - # This method preserves the relative indentation of the lines. + # This method preserves the significant indentation (that exceeding the block indent) on each line. # - # lines - the Array of String lines to process (no trailing endlines) - # indent - the integer number of spaces to add to the beginning - # of each line; if this value is nil, the existing - # space is preserved (optional, default: 0) + # lines - The Array of String lines to process (no trailing newlines) + # indent_size - The Integer number of spaces to readd to the start of non-empty lines after removing the indentation. + # If this value is < 0, the existing indentation is preserved (optional, default: 0) + # tab_size - the Integer number of spaces to use in place of a tab. A value of <= 0 disables the replacement + # (optional, default: 0) # # Examples # # source = < [" def names", " @names.split ' '", " end"] + # source.split ?\n + # # => [" def names", " @names.split", " end"] # - # puts Parser.adjust_indentation!(source.split "\n") * "\n" + # puts (Parser.adjust_indentation! source.split ?\n).join ?\n # # => def names - # # => @names.split ' ' + # # => @names.split # # => end # # returns Nothing - #-- - # QUESTION should indent be called margin? - def self.adjust_indentation! lines, indent = 0, tab_size = 0 + def self.adjust_indentation! lines, indent_size = 0, tab_size = 0 return if lines.empty? - # expand tabs if a tab is detected unless tab_size is nil - if (tab_size = tab_size.to_i) > 0 && (lines.join.include? TAB) - #if (tab_size = tab_size.to_i) > 0 && (lines.index {|line| line.include? TAB }) + # expand tabs if a tab character is detected and tab_size > 0 + if tab_size > 0 && lines.any? {|line| line.include? TAB } full_tab_space = ' ' * tab_size lines.map! do |line| - next line if line.empty? - - # NOTE Opal has to patch this use of sub! - line.sub!(TabIndentRx) {|tabs| full_tab_space * tabs.length } if line.start_with? TAB - - if line.include? TAB + if line.empty? + line + elsif (tab_idx = line.index TAB) + if tab_idx == 0 + leading_tabs = 0 + line.each_byte do |b| + break unless b == 9 + leading_tabs += 1 + end + line = %(#{full_tab_space * leading_tabs}#{line.slice leading_tabs, line.length}) + next line unless line.include? TAB + end # keeps track of how many spaces were added to adjust offset in match data spaces_added = 0 - # NOTE Opal has to patch this use of gsub! - line.gsub!(TabRx) { - # calculate how many spaces this tab represents, then replace tab with spaces - if (offset = ($~.begin 0) + spaces_added) % tab_size == 0 - spaces_added += (tab_size - 1) - full_tab_space - else - unless (spaces = tab_size - offset % tab_size) == 1 - spaces_added += (spaces - 1) + idx = 0 + result = '' + line.each_char do |c| + if c == TAB + # calculate how many spaces this tab represents, then replace tab with spaces + if (offset = idx + spaces_added) % tab_size == 0 + spaces_added += (tab_size - 1) + result = result + full_tab_space + else + unless (spaces = tab_size - offset % tab_size) == 1 + spaces_added += (spaces - 1) + end + result = result + (' ' * spaces) end - ' ' * spaces + else + result = result + c end - } + idx += 1 + end + result else line end end end - # skip adjustment of gutter if indent is -1 - return unless indent && (indent = indent.to_i) > -1 + # skip block indent adjustment if indent_size is < 0 + return if indent_size < 0 - # determine width of gutter - gutter_width = nil + # determine block indent (assumes no whitespace-only lines are present) + block_indent = nil lines.each do |line| next if line.empty? - # NOTE this logic assumes no whitespace-only lines if (line_indent = line.length - line.lstrip.length) == 0 - gutter_width = nil + block_indent = nil break - else - unless gutter_width && line_indent > gutter_width - gutter_width = line_indent - end end + block_indent = line_indent unless block_indent && block_indent < line_indent end - # remove gutter then apply new indent if specified - # NOTE gutter_width is > 0 if not nil - if indent == 0 - if gutter_width - lines.map! {|line| line.empty? ? line : line[gutter_width..-1] } - end + # remove block indent then apply indent_size if specified + # NOTE block_indent is > 0 if not nil + if indent_size == 0 + lines.map! {|line| line.empty? ? line : (line.slice block_indent, line.length) } if block_indent else - padding = ' ' * indent - if gutter_width - lines.map! {|line| line.empty? ? line : padding + line[gutter_width..-1] } + new_block_indent = ' ' * indent_size + if block_indent + lines.map! {|line| line.empty? ? line : new_block_indent + (line.slice block_indent, line.length) } else - lines.map! {|line| line.empty? ? line : padding + line } + lines.map! {|line| line.empty? ? line : new_block_indent + line } end end nil end - # Public: Convert a string to a legal attribute name. + def self.uniform? str, chr, len + (str.count chr) == len + end + + # Internal: Convert a string to a legal attribute name. # # name - the String name of the attribute # @@ -2749,27 +2747,5 @@ def self.sanitize_attribute_name(name) name.gsub(InvalidAttributeNameCharsRx, '').downcase end - - # Internal: Converts a Roman numeral to an integer value. - # - # value - The String Roman numeral to convert - # - # Returns the Integer for this Roman numeral - def self.roman_numeral_to_int(value) - value = value.downcase - digits = { 'i' => 1, 'v' => 5, 'x' => 10 } - result = 0 - - (0..value.length - 1).each {|i| - digit = digits[value[i..i]] - if i + 1 < value.length && digits[value[i+1..i+1]] > digit - result -= digit - else - result += digit - end - } - - result - end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/path_resolver.rb asciidoctor-2.0.10/lib/asciidoctor/path_resolver.rb --- asciidoctor-1.5.5/lib/asciidoctor/path_resolver.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/path_resolver.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,4 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: Handles all operations for resolving, cleaning and joining paths. # This class includes operations for handling both web paths (request URIs) and @@ -7,7 +7,7 @@ # The main emphasis of the class is on creating clean and secure paths. Clean # paths are void of duplicate parent and current directory references in the # path name. Secure paths are paths which are restricted from accessing -# directories outside of a jail root, if specified. +# directories outside of a jail path, if specified. # # Since joining two paths can result in an insecure path, this class also # handles the task of joining a parent (start) and child (target) path. @@ -84,7 +84,7 @@ # => 'C:/data/docs/css' # # begin -# resolver.system_path('../../../css', '../../..', '/path/to/docs', :recover => false) +# resolver.system_path('../../../css', '../../..', '/path/to/docs', recover: false) # rescue SecurityError => e # puts e.message # end @@ -94,20 +94,22 @@ # => '/path/to/docs/images' # # begin -# resolver.system_path('images', '/etc', '/path/to/docs') +# resolver.system_path('images', '/etc', '/path/to/docs', recover: false) # rescue SecurityError => e # puts e.message # end -# => Start path /etc is outside of jail: /path/to/docs' +# => start path /etc is outside of jail: /path/to/docs' # class PathResolver + include Logging + DOT = '.' DOT_DOT = '..' DOT_SLASH = './' SLASH = '/' BACKSLASH = '\\' DOUBLE_SLASH = '//' - WindowsRootRx = /^[a-zA-Z]:(?:\\|\/)/ + WindowsRootRx = /^(?:[a-zA-Z]:)?[\\\/]/ attr_accessor :file_separator attr_accessor :working_dir @@ -118,39 +120,52 @@ # expanded to an absolute path inside the constructor. # # file_separator - the String file separator to use for path operations - # (optional, default: File::SEPARATOR) + # (optional, default: File::ALT_SEPARATOR or File::SEPARATOR) # working_dir - the String working directory (optional, default: Dir.pwd) # def initialize file_separator = nil, working_dir = nil - @file_separator = file_separator ? file_separator : (::File::ALT_SEPARATOR || ::File::SEPARATOR) - if working_dir - @working_dir = (is_root? working_dir) ? working_dir : (::File.expand_path working_dir) - else - @working_dir = ::File.expand_path ::Dir.pwd - end + @file_separator = file_separator || ::File::ALT_SEPARATOR || ::File::SEPARATOR + @working_dir = working_dir ? ((root? working_dir) ? (posixify working_dir) : (::File.expand_path working_dir)) : ::Dir.pwd @_partition_path_sys = {} @_partition_path_web = {} end - # Public: Check if the specified path is an absolute root path - # This operation correctly handles both posix and windows paths. + # Public: Check whether the specified path is an absolute path. + # + # This operation considers both posix paths and Windows paths. The path does + # not have to be posixified beforehand. This operation does not handle URIs. + # + # Unix absolute paths start with a slash. UNC paths can start with a slash or + # backslash. Windows roots can start with a drive letter. # # path - the String path to check # # returns a Boolean indicating whether the path is an absolute root path - def is_root? path - # Unix absolute paths and UNC paths start with slash - if path.start_with? SLASH - true - # Windows roots can begin with drive letter - elsif @file_separator == BACKSLASH && WindowsRootRx =~ path - true - # Absolute paths in the browser start with file:/// - elsif ::RUBY_ENGINE_OPAL && ::JAVASCRIPT_PLATFORM == 'browser' && (path.start_with? 'file:///') - true - else - false + def absolute_path? path + (path.start_with? SLASH) || (@file_separator == BACKSLASH && (WindowsRootRx.match? path)) + end + + # Public: Check if the specified path is an absolute root path (or, in the + # browser environment, an absolute URI as well) + # + # This operation considers both posix paths and Windows paths. If the JavaScript IO + # module is xmlhttprequest, this operation also considers absolute URIs. + # + # Unix absolute paths and UNC paths start with slash. Windows roots can + # start with a drive letter. When the IO module is xmlhttprequest (Opal + # runtime only), an absolute (qualified) URI (starts with file://, http://, + # or https://) is also considered to be an absolute path. + # + # path - the String path to check + # + # returns a Boolean indicating whether the path is an absolute root path (or + # an absolute URI when the JavaScript IO module is xmlhttprequest) + if RUBY_ENGINE == 'opal' && ::JAVASCRIPT_IO_MODULE == 'xmlhttprequest' + def root? path + (absolute_path? path) || (path.start_with? 'file://', 'http://', 'https://') end + else + alias root? absolute_path? end # Public: Determine if the path is a UNC (root) path @@ -158,7 +173,7 @@ # path - the String path to check # # returns a Boolean indicating whether the path is a UNC path - def is_unc? path + def unc? path path.start_with? DOUBLE_SLASH end @@ -167,111 +182,137 @@ # path - the String path to check # # returns a Boolean indicating whether the path is an absolute (root) web path - def is_web_root? path + def web_root? path path.start_with? SLASH end + # Public: Determine whether path descends from base. + # + # If path equals base, or base is a parent of path, return true. + # + # path - The String path to check. Can be relative. + # base - The String base path to check against. Can be relative. + # + # returns If path descends from base, return the offset, otherwise false. + def descends_from? path, base + if base == path + 0 + elsif base == SLASH + (path.start_with? SLASH) && 1 + else + (path.start_with? base + SLASH) && (base.length + 1) + end + end + + # Public: Calculate the relative path to this absolute path from the specified base directory + # + # If neither path or base are absolute paths, the path is not contained + # within the base directory, or the relative path cannot be computed, the + # original path is returned work is done. + # + # path - [String] an absolute filename. + # base - [String] an absolute base directory. + # + # Return the [String] relative path of the specified path calculated from the base directory. + def relative_path path, base + if root? path + if (offset = descends_from? path, base) + path.slice offset, path.length + else + begin + (Pathname.new path).relative_path_from(Pathname.new base).to_s + rescue + path + end + end + else + path + end + end + # Public: Normalize path by converting any backslashes to forward slashes # # path - the String path to normalize # # returns a String path with any backslashes replaced with forward slashes - def posixfy path - if path.nil_or_empty? - '' - elsif path.include? BACKSLASH - path.tr BACKSLASH, SLASH + def posixify path + if path + @file_separator == BACKSLASH && (path.include? BACKSLASH) ? (path.tr BACKSLASH, SLASH) : path else - path + '' end end + alias posixfy posixify - # Public: Expand the path by resolving any parent references (..) - # and cleaning self references (.). - # - # The result will be relative if the path is relative and - # absolute if the path is absolute. The file separator used - # in the expanded path is the one specified when the class - # was constructed. + # Public: Expand the specified path by converting the path to a posix path, resolving parent + # references (..), and removing self references (.). # # path - the String path to expand # - # returns a String path with any parent or self references resolved. + # returns a String path as a posix path with parent references resolved and self references removed. + # The result will be relative if the path is relative and absolute if the path is absolute. def expand_path path - path_segments, path_root, _ = partition_path path - join_path path_segments, path_root + path_segments, path_root = partition_path path + if path.include? DOT_DOT + resolved_segments = [] + path_segments.each do |segment| + segment == DOT_DOT ? resolved_segments.pop : resolved_segments << segment + end + join_path resolved_segments, path_root + else + join_path path_segments, path_root + end end - # Public: Partition the path into path segments and remove any empty segments - # or segments that are self references (.). The path is converted to a posix - # path before being partitioned. - # - # path - the String path to partition - # web_path - a Boolean indicating whether the path should be handled - # as a web path (optional, default: false) - # - # Returns a 3-item Array containing the Array of String path segments, the - # path root (e.g., '/', './', 'c:/') if the path is absolute and the posix - # version of the path. - #-- - # QUESTION is it worth it to normalize slashes? it doubles the time elapsed - def partition_path path, web_path = false - if (result = web_path ? @_partition_path_web[path] : @_partition_path_sys[path]) + # Public: Partition the path into path segments and remove self references (.) and the trailing + # slash, if present. Prior to being partitioned, the path is converted to a posix path. + # + # Parent references are not resolved by this method since the consumer often needs to handle this + # resolution in a certain context (checking for the breach of a jail, for instance). + # + # path - the String path to partition + # web - a Boolean indicating whether the path should be handled + # as a web path (optional, default: false) + # + # Returns a 2-item Array containing the Array of String path segments and the + # path root (e.g., '/', './', 'c:/', or '//'), which is nil unless the path is absolute. + def partition_path path, web = nil + if (result = (cache = web ? @_partition_path_web : @_partition_path_sys)[path]) return result end - posix_path = posixfy path + posix_path = posixify path - root = if web_path + if web # ex. /sample/path - if is_web_root? posix_path - SLASH + if web_root? posix_path + root = SLASH # ex. ./sample/path elsif posix_path.start_with? DOT_SLASH - DOT_SLASH - # ex. sample/path - else - nil + root = DOT_SLASH + # else ex. sample/path end - else - if is_root? posix_path - # ex. //sample/path - if is_unc? posix_path - DOUBLE_SLASH - # ex. /sample/path - elsif posix_path.start_with? SLASH - SLASH - # ex. c:/sample/path (or file:///sample/path in browser environment) - else - posix_path[0..(posix_path.index SLASH)] - end - # ex. ./sample/path - elsif posix_path.start_with? DOT_SLASH - DOT_SLASH - # ex. sample/path + elsif root? posix_path + # ex. //sample/path + if unc? posix_path + root = DOUBLE_SLASH + # ex. /sample/path + elsif posix_path.start_with? SLASH + root = SLASH + # ex. C:/sample/path (or file:///sample/path in browser environment) else - nil + root = posix_path.slice 0, (posix_path.index SLASH) + 1 end + # ex. ./sample/path + elsif posix_path.start_with? DOT_SLASH + root = DOT_SLASH + # else ex. sample/path end - path_segments = posix_path.split SLASH - # shift twice for a UNC path - if root == DOUBLE_SLASH - path_segments = path_segments[2..-1] - # shift twice for a file:/// path and adjust root - # NOTE technically file:/// paths work without this adjustment - #elsif ::RUBY_ENGINE_OPAL && ::JAVASCRIPT_PLATFORM == 'browser' && root == 'file:/' - # root = 'file://' - # path_segments = path_segments[2..-1] - # shift once for any other root - elsif root - path_segments.shift - end + path_segments = (root ? (posix_path.slice root.length, posix_path.length) : posix_path).split SLASH # strip out all dot entries path_segments.delete DOT - # QUESTION should we chomp trailing /? (we pay a small fraction) - #posix_path = posix_path.chomp '/' - (web_path ? @_partition_path_web : @_partition_path_sys)[path] = [path_segments, root, posix_path] + cache[path] = [path_segments, root] end # Public: Join the segments using the posix file separator (since Ruby knows @@ -285,117 +326,138 @@ # returns a String path formed by joining the segments using the posix file # separator and prepending the root, if specified def join_path segments, root = nil - if root - %(#{root}#{segments * SLASH}) - else - segments * SLASH - end + root ? %(#{root}#{segments.join SLASH}) : (segments.join SLASH) end - # Public: Resolve a system path from the target and start paths. If a jail - # path is specified, enforce that the resolved directory is contained within - # the jail path. If a jail path is not provided, the resolved path may be - # any location on the system. If the resolved path is absolute, use it as is. - # If the resolved path is relative, resolve it relative to the working_dir - # specified in the constructor. + # Public: Securely resolve a system path + # + # Resolve a system path from the target relative to the start path, jail path, or working + # directory (specified in the constructor), in that order. If a jail path is specified, enforce + # that the resolved path descends from the jail path. If a jail path is not provided, the resolved + # path may be any location on the system. If the resolved path is absolute, use it as is (unless + # it breaches the jail path). Expand all parent and self references in the resolved path. # # target - the String target path - # start - the String start (i.e., parent) path - # jail - the String jail path to confine the resolved path + # start - the String start path from which to resolve a relative target; falls back to jail, if + # specified, or the working directory specified in the constructor (default: nil) + # jail - the String jail path to which to confine the resolved path, if specified; must be an + # absolute path (default: nil) # opts - an optional Hash of options to control processing (default: {}): - # * :recover is used to control whether the processor should auto-recover - # when an illegal path is encountered + # * :recover is used to control whether the processor should + # automatically recover when an illegal path is encountered # * :target_name is used in messages to refer to the path being resolved # - # returns a String path that joins the target path with the start path with - # any parent references resolved and self references removed and enforces - # that the resolved path be contained within the jail, if provided - def system_path target, start, jail = nil, opts = {} + # returns a String path relative to the start path, if specified, and confined to the jail path, + # if specified. The path is posixified and all parent and self references in the path are expanded. + def system_path target, start = nil, jail = nil, opts = {} if jail - unless is_root? jail - raise ::SecurityError, %(Jail is not an absolute path: #{jail}) + raise ::SecurityError, %(Jail is not an absolute path: #{jail}) unless root? jail + #raise ::SecurityError, %(Jail is not a canonical path: #{jail}) if jail.include? DOT_DOT + jail = posixify jail + end + + if target + if root? target + target_path = expand_path target + if jail && !(descends_from? target_path, jail) + if opts.fetch :recover, true + logger.warn %(#{opts[:target_name] || 'path'} is outside of jail; recovering automatically) + target_segments, _ = partition_path target_path + jail_segments, jail_root = partition_path jail + return join_path jail_segments + target_segments, jail_root + else + raise ::SecurityError, %(#{opts[:target_name] || 'path'} #{target} is outside of jail: #{jail} (disallowed in safe mode)) + end + end + return target_path + else + target_segments, _ = partition_path target end - jail = posixfy jail - end - - if target.nil_or_empty? - target_segments = [] else - target_segments, target_root, _ = partition_path target + target_segments = [] end if target_segments.empty? if start.nil_or_empty? - return jail ? jail : @working_dir - elsif is_root? start - unless jail + return jail || @working_dir + elsif root? start + if jail + start = posixify start + else return expand_path start end else - return system_path start, jail, jail, opts - end - end - - if target_root && target_root != DOT_SLASH - resolved_target = join_path target_segments, target_root - # if target is absolute and a sub-directory of jail, or - # a jail is not in place, let it slide - if !jail || (resolved_target.start_with? jail) - return resolved_target + target_segments, _ = partition_path start + start = jail || @working_dir end - end - - if start.nil_or_empty? - start = jail ? jail : @working_dir - elsif is_root? start - start = posixfy start + elsif start.nil_or_empty? + start = jail || @working_dir + elsif root? start + start = posixify start if jail else - start = system_path start, jail, jail, opts + #start = system_path start, jail, jail, opts + start = %(#{(jail || @working_dir).chomp '/'}/#{start}) end - # both jail and start have been posixfied at this point - if jail == start - jail_segments, jail_root, _ = partition_path jail - start_segments = jail_segments.dup - elsif jail - unless start.start_with? jail - raise ::SecurityError, %(#{opts[:target_name] || 'Start path'} #{start} is outside of jail: #{jail} (disallowed in safe mode)) + # both jail and start have been posixified at this point if jail is set + if jail && (recheck = !(descends_from? start, jail)) && @file_separator == BACKSLASH + start_segments, start_root = partition_path start + jail_segments, jail_root = partition_path jail + if start_root != jail_root + if opts.fetch :recover, true + logger.warn %(start path for #{opts[:target_name] || 'path'} is outside of jail root; recovering automatically) + start_segments = jail_segments + recheck = false + else + raise ::SecurityError, %(start path for #{opts[:target_name] || 'path'} #{start} refers to location outside jail root: #{jail} (disallowed in safe mode)) + end end - - start_segments, start_root, _ = partition_path start - jail_segments, jail_root, _ = partition_path jail - - # Already checked for this condition - #if start_root != jail_root - # raise ::SecurityError, %(Jail root #{jail_root} does not match root of #{opts[:target_name] || 'start path'}: #{start_root}) - #end else - start_segments, start_root, _ = partition_path start - jail_root = start_root + start_segments, jail_root = partition_path start end - resolved_segments = start_segments.dup - warned = false - target_segments.each do |segment| - if segment == DOT_DOT - if jail - if resolved_segments.length > jail_segments.length - resolved_segments.pop - elsif !(recover ||= (opts.fetch :recover, true)) - raise ::SecurityError, %(#{opts[:target_name] || 'path'} #{target} refers to location outside jail: #{jail} (disallowed in safe mode)) - elsif !warned - warn %(asciidoctor: WARNING: #{opts[:target_name] || 'path'} has illegal reference to ancestor of jail, auto-recovering) - warned = true + if (resolved_segments = start_segments + target_segments).include? DOT_DOT + unresolved_segments, resolved_segments = resolved_segments, [] + if jail + jail_segments, _ = partition_path jail unless jail_segments + warned = false + unresolved_segments.each do |segment| + if segment == DOT_DOT + if resolved_segments.size > jail_segments.size + resolved_segments.pop + elsif opts.fetch :recover, true + unless warned + logger.warn %(#{opts[:target_name] || 'path'} has illegal reference to ancestor of jail; recovering automatically) + warned = true + end + else + raise ::SecurityError, %(#{opts[:target_name] || 'path'} #{target} refers to location outside jail: #{jail} (disallowed in safe mode)) + end + else + resolved_segments << segment end - else - resolved_segments.pop end else - resolved_segments.push segment + unresolved_segments.each do |segment| + segment == DOT_DOT ? resolved_segments.pop : resolved_segments << segment + end end end - join_path resolved_segments, jail_root + if recheck + target_path = join_path resolved_segments, jail_root + if descends_from? target_path, jail + target_path + elsif opts.fetch :recover, true + logger.warn %(#{opts[:target_name] || 'path'} is outside of jail; recovering automatically) + jail_segments, _ = partition_path jail unless jail_segments + join_path jail_segments + target_segments, jail_root + else + raise ::SecurityError, %(#{opts[:target_name] || 'path'} #{target} is outside of jail: #{jail} (disallowed in safe mode)) + end + else + join_path resolved_segments, jail_root + end end # Public: Resolve a web path from the target and start paths. @@ -412,30 +474,20 @@ # start path with any parent references resolved and self # references removed def web_path target, start = nil - target = posixfy target - start = posixfy start - uri_prefix = nil - - unless start.nil_or_empty? || (is_web_root? target) - target = %(#{start.chomp '/'}#{SLASH}#{target}) - if (uri_prefix = Helpers.uri_prefix target) - target = target[uri_prefix.length..-1] - end + target = posixify target + start = posixify start + + unless start.nil_or_empty? || (web_root? target) + target, uri_prefix = extract_uri_prefix %(#{start}#{(start.end_with? SLASH) ? '' : SLASH}#{target}) end # use this logic instead if we want to normalize target if it contains a URI - #unless is_web_root? target - # if preserve_uri_target && (uri_prefix = Helpers.uri_prefix target) - # target = target[uri_prefix.length..-1] - # elsif !start.nil_or_empty? - # target = %(#{start}#{SLASH}#{target}) - # if (uri_prefix = Helpers.uri_prefix target) - # target = target[uri_prefix.length..-1] - # end - # end + #unless web_root? target + # target, uri_prefix = extract_uri_prefix target if preserve_uri_target + # target, uri_prefix = extract_uri_prefix %(#{start}#{SLASH}#{target}) unless uri_prefix || start.nil_or_empty? #end - target_segments, target_root, _ = partition_path target, true + target_segments, target_root = partition_path target, true resolved_segments = [] target_segments.each do |segment| if segment == DOT_DOT @@ -453,27 +505,28 @@ end end - if uri_prefix - %(#{uri_prefix}#{join_path resolved_segments, target_root}) - else - join_path resolved_segments, target_root + if (resolved_path = join_path resolved_segments, target_root).include? ' ' + resolved_path = resolved_path.gsub ' ', '%20' end + + uri_prefix ? %(#{uri_prefix}#{resolved_path}) : resolved_path end - # Public: Calculate the relative path to this absolute filename from the specified base directory + private + + # Internal: Efficiently extracts the URI prefix from the specified String if the String is a URI # - # If either the filename or the base_directory are not absolute paths, no work is done. + # Uses the Asciidoctor::UriSniffRx regex to match the URI prefix in the specified String (e.g., http://). If present, + # the prefix is removed. # - # filename - An absolute file name as a String - # base_directory - An absolute base directory as a String + # str - the String to check # - # Return the relative path String of the filename calculated from the base directory - def relative_path filename, base_directory - if (is_root? filename) && (is_root? base_directory) - offset = base_directory.chomp(@file_separator).length + 1 - filename[offset..-1] + # returns a tuple containing the specified string without the URI prefix, if present, and the extracted URI prefix. + def extract_uri_prefix str + if (str.include? ':') && UriSniffRx =~ str + [(str.slice $&.length, str.length), $&] else - filename + str end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/reader.rb asciidoctor-2.0.10/lib/asciidoctor/reader.rb --- asciidoctor-1.5.5/lib/asciidoctor/reader.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/reader.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,25 +1,25 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: Methods for retrieving lines from AsciiDoc source files class Reader + include Logging + class Cursor - attr_accessor :file - attr_accessor :dir - attr_accessor :path - attr_accessor :lineno - - def initialize file, dir = nil, path = nil, lineno = nil - @file = file - @dir = dir - @path = path - @lineno = lineno + attr_reader :file, :dir, :path, :lineno + + def initialize file, dir = nil, path = nil, lineno = 1 + @file, @dir, @path, @lineno = file, dir, path, lineno + end + + def advance num + @lineno += num end def line_info - %(#{path}: line #{lineno}) + %(#{@path}: line #{@lineno}) end - alias :to_s :line_info + alias to_s line_info end attr_reader :file @@ -35,10 +35,14 @@ # Public: Control whether lines are processed using Reader#process_line on first visit (default: true) attr_accessor :process_lines + # Public: Indicates that the end of the reader was reached with a delimited block still open. + attr_accessor :unterminated + # Public: Initialize the Reader object - def initialize data = nil, cursor = nil, opts = {:normalize => false} + def initialize data = nil, cursor = nil, opts = {} if !cursor - @file = @dir = nil + @file = nil + @dir = '.' @path = '' @lineno = 1 # IMPORTANT lineno assignment must proceed prepare_lines call! elsif ::String === cursor @@ -46,71 +50,23 @@ @dir, @path = ::File.split @file @lineno = 1 # IMPORTANT lineno assignment must proceed prepare_lines call! else - @file = cursor.file - @dir = cursor.dir - @path = cursor.path || '' - if @file - unless @dir - # REVIEW might to look at this assignment closer - @dir = ::File.dirname @file - @dir = nil if @dir == '.' # right? - end - - unless cursor.path - @path = ::File.basename @file - end + if (@file = cursor.file) + @dir = cursor.dir || (::File.dirname @file) + @path = cursor.path || (::File.basename @file) + else + @dir = cursor.dir || '.' + @path = cursor.path || '' end @lineno = cursor.lineno || 1 # IMPORTANT lineno assignment must proceed prepare_lines call! end - @lines = data ? (prepare_lines data, opts) : [] - @source_lines = @lines.dup - @eof = @lines.empty? + @lines = prepare_lines data, opts + @source_lines = @lines.drop 0 + @mark = nil @look_ahead = 0 @process_lines = true @unescape_next_line = false - end - - # Internal: Prepare the lines from the provided data - # - # This method strips whitespace from the end of every line of - # the source data and appends a LF (i.e., Unix endline). This - # whitespace substitution is very important to how Asciidoctor - # works. - # - # Any leading or trailing blank lines are also removed. - # - # data - A String Array of input data to be normalized - # opts - A Hash of options to control what cleansing is done - # - # Returns The String lines extracted from the data - def prepare_lines data, opts = {} - if ::String === data - if opts[:normalize] - Helpers.normalize_lines_from_string data - else - data.split EOL - end - else - if opts[:normalize] - Helpers.normalize_lines_array data - else - data.dup - end - end - end - - # Internal: Processes a previously unvisited line - # - # By default, this method marks the line as processed - # by incrementing the look_ahead counter and returns - # the line unmodified. - # - # Returns The String line the Reader should make available to the next - # invocation of Reader#read_line or nil if the Reader should drop the line, - # advance to the next line and process it. - def process_line line - @look_ahead += 1 if @process_lines - line + @unterminated = nil + @saved = nil end # Public: Check whether there are any lines left to read. @@ -121,8 +77,26 @@ # # Returns True if there are more lines, False if there are not. def has_more_lines? - !(@eof || (@eof = peek_line.nil?)) + if @lines.empty? + @look_ahead = 0 + false + else + true + end + end + + # Public: Check whether this reader is empty (contains no lines) + # + # Returns true if there are no more lines to peek, otherwise false. + def empty? + if @lines.empty? + @look_ahead = 0 + true + else + false + end end + alias eof? empty? # Public: Peek at the next line and check if it's empty (i.e., whitespace only) # @@ -133,16 +107,19 @@ peek_line.nil_or_empty? end - # Public: Peek at the next line of source data. Processes the line, if not + # Public: Peek at the next line of source data. Processes the line if not # already marked as processed, but does not consume it. # # This method will probe the reader for more lines. If there is a next line # that has not previously been visited, the line is passed to the # Reader#process_line method to be initialized. This call gives - # sub-classess the opportunity to do preprocessing. If the return value of + # sub-classes the opportunity to do preprocessing. If the return value of # the Reader#process_line is nil, the data is assumed to be changed and # Reader#peek_line is invoked again to perform further processing. # + # If has_more_lines? is called immediately before peek_line, the direct flag + # is implicitly true (since the line is flagged as visited). + # # direct - A Boolean flag to bypasses the check for more lines and immediately # returns the first element of the internal @lines Array. (default: false) # @@ -150,24 +127,19 @@ # Returns nothing if there is no more data. def peek_line direct = false if direct || @look_ahead > 0 - @unescape_next_line ? @lines[0][1..-1] : @lines[0] - elsif @eof || @lines.empty? - @eof = true + @unescape_next_line ? ((line = @lines[0]).slice 1, line.length) : @lines[0] + elsif @lines.empty? @look_ahead = 0 nil else # FIXME the problem with this approach is that we aren't # retaining the modified line (hence the @unescape_next_line tweak) - # perhaps we need a stack of proxy lines - if !(line = process_line @lines[0]) - peek_line - else - line - end + # perhaps we need a stack of proxied lines + (line = process_line @lines[0]) ? line : peek_line end end - # Public: Peek at the next multiple lines of source data. Processes the lines, if not + # Public: Peek at the next multiple lines of source data. Processes the lines if not # already marked as processed, but does not consume them. # # This method delegates to Reader#read_line to process and collect the line, then @@ -175,24 +147,25 @@ # be processed and marked as such so that subsequent reads will not need to process # the lines again. # - # num - The Integer number of lines to peek. - # direct - A Boolean indicating whether processing should be disabled when reading lines + # num - The positive Integer number of lines to peek or nil to peek all lines (default: nil). + # direct - A Boolean indicating whether processing should be disabled when reading lines (default: false). # # Returns A String Array of the next multiple lines of source data, or an empty Array # if there are no more lines in this Reader. - def peek_lines num = 1, direct = true + def peek_lines num = nil, direct = false old_look_ahead = @look_ahead result = [] - num.times do - if (line = read_line direct) + (num || MAX_INT).times do + if (line = direct ? shift : read_line) result << line else + @lineno -= 1 if direct break end end unless result.empty? - result.reverse_each {|line| unshift line } + unshift_all result @look_ahead = old_look_ahead if direct end @@ -201,17 +174,11 @@ # Public: Get the next line of source data. Consumes the line returned. # - # direct - A Boolean flag to bypasses the check for more lines and immediately - # returns the first element of the internal @lines Array. (default: false) - # # Returns the String of the next line of the source data if data is present. # Returns nothing if there is no more data. - def read_line direct = false - if direct || @look_ahead > 0 || has_more_lines? - shift - else - nil - end + def read_line + # has_more_lines? triggers preprocessor + shift if @look_ahead > 0 || has_more_lines? end # Public: Get the remaining lines of source data. @@ -224,12 +191,13 @@ # Returns the lines read as a String Array def read_lines lines = [] + # has_more_lines? triggers preprocessor while has_more_lines? lines << shift end lines end - alias :readlines :read_lines + alias readlines read_lines # Public: Get the remaining lines of source data joined as a String. # @@ -237,23 +205,22 @@ # # Returns the lines read joined as a String def read - read_lines * EOL + read_lines.join LF end # Public: Advance to the next line by discarding the line at the front of the stack # - # direct - A Boolean flag to bypasses the check for more lines and immediately - # returns the first element of the internal @lines Array. (default: true) - # # Returns a Boolean indicating whether there was a line to discard. - def advance direct = true - !!read_line(direct) + def advance + shift ? true : false end # Public: Push the String line onto the beginning of the Array of source data. # - # Since this line was (assumed to be) previously retrieved through the - # reader, it is marked as seen. + # A line pushed on the reader using this method is not processed again. The + # method assumes the line was previously retrieved from the reader or does + # not otherwise contain preprocessor directives. Therefore, it is marked as + # processed immediately. # # line_to_restore - the line to restore onto the stack # @@ -262,20 +229,21 @@ unshift line_to_restore nil end - alias :restore_line :unshift_line + alias restore_line unshift_line # Public: Push an Array of lines onto the front of the Array of source data. # - # Since these lines were (assumed to be) previously retrieved through the - # reader, they are marked as seen. + # Lines pushed on the reader using this method are not processed again. The + # method assumes the lines were previously retrieved from the reader or do + # not otherwise contain preprocessor directives. Therefore, they are marked + # as processed immediately. # # Returns nothing. def unshift_lines lines_to_restore - # QUESTION is it faster to use unshift(*lines_to_restore)? - lines_to_restore.reverse_each {|line| unshift line } + unshift_all lines_to_restore nil end - alias :restore_lines :unshift_lines + alias restore_lines unshift_lines # Public: Replace the next line with the specified line. # @@ -285,88 +253,88 @@ # # replacement - The String line to put in place of the next line (i.e., the line at the cursor). # - # Returns nothing. + # Returns true. def replace_next_line replacement - advance + shift unshift replacement - nil + true end # deprecated - alias :replace_line :replace_next_line + alias replace_line replace_next_line - # Public: Strip off leading blank lines in the Array of lines. + # Public: Skip blank lines at the cursor. # # Examples # - # @lines + # reader.lines # => ["", "", "Foo", "Bar", ""] - # - # skip_blank_lines + # reader.skip_blank_lines # => 2 - # - # @lines + # reader.lines # => ["Foo", "Bar", ""] # - # Returns an Integer of the number of lines skipped + # Returns the [Integer] number of lines skipped or nothing if all lines have + # been consumed (even if lines were skipped by this method). def skip_blank_lines - return 0 if eof? + return if empty? num_skipped = 0 # optimized code for shortest execution path while (next_line = peek_line) if next_line.empty? - advance + shift num_skipped += 1 else return num_skipped end end - - num_skipped end - # Public: Skip consecutive lines containing line comments and return them. + # Public: Skip consecutive comment lines and block comments. # # Examples # @lines # => ["// foo", "bar"] # # comment_lines = skip_comment_lines - # => ["// foo"] + # => nil # # @lines # => ["bar"] # - # Returns the Array of lines that were skipped - def skip_comment_lines opts = {} - return [] if eof? - - comment_lines = [] - include_blank_lines = opts[:include_blank_lines] - while (next_line = peek_line) - if include_blank_lines && next_line.empty? - comment_lines << shift - elsif (commentish = next_line.start_with?('//')) && (match = CommentBlockRx.match(next_line)) - comment_lines << shift - comment_lines.push(*(read_lines_until(:terminator => match[0], :read_last_line => true, :skip_processing => true))) - elsif commentish && CommentLineRx =~ next_line - comment_lines << shift + # Returns nothing + def skip_comment_lines + return if empty? + + while (next_line = peek_line) && !next_line.empty? + if next_line.start_with? '//' + if next_line.start_with? '///' + if (ll = next_line.length) > 3 && next_line == '/' * ll + read_lines_until terminator: next_line, skip_first_line: true, read_last_line: true, skip_processing: true, context: :comment + else + break + end + else + shift + end else break end end - comment_lines + nil end - # Public: Skip consecutive lines that are line comments and return them. + # Public: Skip consecutive comment lines and return them. + # + # This method assumes the reader only contains simple lines (no blocks). def skip_line_comments - return [] if eof? + return [] if empty? comment_lines = [] # optimized code for shortest execution path - while (next_line = peek_line) - if CommentLineRx =~ next_line + while (next_line = peek_line) && !next_line.empty? + if (next_line.start_with? '//') comment_lines << shift else break @@ -382,26 +350,21 @@ def terminate @lineno += @lines.size @lines.clear - @eof = true @look_ahead = 0 nil end - # Public: Check whether this reader is empty (contains no lines) - # - # Returns true if there are no more lines to peek, otherwise false. - def eof? - !has_more_lines? - end - alias :empty? :eof? - # Public: Return all the lines from `@lines` until we (1) run out them, - # (2) find a blank line with :break_on_blank_lines => true, or (3) find + # (2) find a blank line with `break_on_blank_lines: true`, or (3) find # a line for which the given block evals to true. # # options - an optional Hash of processing options: + # * :terminator may be used to specify the contents of the line + # at which the reader should stop # * :break_on_blank_lines may be used to specify to break on # blank lines + # * :break_on_list_continuation may be used to specify to break + # on a list continuation line # * :skip_first_line may be used to tell the reader to advance # beyond the first line before beginning the scan # * :preserve_last_line may be used to specify that the String @@ -410,6 +373,10 @@ # * :read_last_line may be used to specify that the String # causing the method to stop processing lines should be # included in the lines being returned + # * :skip_line_comments may be used to look for and skip + # line comments + # * :skip_processing is used to disable line (pre)processing + # for the duration of this method # # Returns the Array of lines forming the next segment. # @@ -421,21 +388,18 @@ # "\n", # "Third line\n", # ] - # reader = Reader.new data, nil, :normalize => true + # reader = Reader.new data, nil, normalize: true # # reader.read_lines_until # => ["First line", "Second line"] def read_lines_until options = {} result = [] - advance if options[:skip_first_line] if @process_lines && options[:skip_processing] @process_lines = false restore_process_lines = true - else - restore_process_lines = false end - if (terminator = options[:terminator]) + start_cursor = options[:cursor] || cursor break_on_blank_lines = false break_on_list_continuation = false else @@ -443,10 +407,8 @@ break_on_list_continuation = options[:break_on_list_continuation] end skip_comments = options[:skip_line_comments] - line_read = false - line_restored = false - - complete = false + complete = line_read = line_restored = nil + shift if options[:skip_first_line] while !complete && (line = read_line) complete = while true break true if terminator && line == terminator @@ -459,7 +421,6 @@ break true if block_given? && (yield line) break false end - if complete if options[:read_last_line] result << line @@ -470,17 +431,21 @@ line_restored = true end else - unless skip_comments && line.start_with?('//') && CommentLineRx =~ line + unless skip_comments && (line.start_with? '//') && !(line.start_with? '///') result << line line_read = true end end end - if restore_process_lines @process_lines = true @look_ahead -= 1 if line_restored && !terminator end + if terminator && terminator != line && (context = options.fetch :context, terminator) + start_cursor = cursor_at_mark if start_cursor == :at_mark + logger.warn message_with_context %(unterminated #{context} block), source_location: start_cursor + @unterminated = true + end result end @@ -488,6 +453,7 @@ # # This method can be used directly when you've already called peek_line # and determined that you do, in fact, want to pluck that line off the stack. + # Use read_line if the line hasn't (or many not have been) visited yet. # # Returns The String line at the top of the stack def shift @@ -500,49 +466,144 @@ def unshift line @lineno -= 1 @look_ahead += 1 - @eof = false @lines.unshift line end + # Internal: Restore the lines to the stack and decrement the lineno + def unshift_all lines + @lineno -= lines.size + @look_ahead += lines.size + @lines.unshift(*lines) + end + def cursor Cursor.new @file, @dir, @path, @lineno end + def cursor_at_line lineno + Cursor.new @file, @dir, @path, lineno + end + + def cursor_at_mark + @mark ? Cursor.new(*@mark) : cursor + end + + def cursor_before_mark + if @mark + m_file, m_dir, m_path, m_lineno = @mark + Cursor.new m_file, m_dir, m_path, m_lineno - 1 + else + Cursor.new @file, @dir, @path, @lineno - 1 + end + end + + def cursor_at_prev_line + Cursor.new @file, @dir, @path, @lineno - 1 + end + + def mark + @mark = @file, @dir, @path, @lineno + end + # Public: Get information about the last line read, including file name and line number. # # Returns A String summary of the last line read def line_info %(#{@path}: line #{@lineno}) end - alias :next_line_info :line_info - - def prev_line_info - %(#{@path}: line #{@lineno - 1}) - end # Public: Get a copy of the remaining Array of String lines managed by this Reader # # Returns A copy of the String Array of lines remaining in this Reader def lines - @lines.dup + @lines.drop 0 end # Public: Get a copy of the remaining lines managed by this Reader joined as a String def string - @lines * EOL + @lines.join LF end # Public: Get the source lines for this Reader joined as a String def source - @source_lines * EOL + @source_lines.join LF + end + + # Internal: Save the state of the reader at cursor + def save + @saved = {}.tap do |accum| + instance_variables.each do |name| + unless name == :@saved || name == :@source_lines + accum[name] = ::Array === (val = instance_variable_get name) ? (val.drop 0) : val + end + end + end + nil end - # Public: Get a summary of this Reader. + # Internal: Restore the state of the reader at cursor + def restore_save + if @saved + @saved.each do |name, val| + instance_variable_set name, val + end + @saved = nil + end + end + + # Internal: Discard a previous saved state + def discard_save + @saved = nil + end + + def to_s + %(#<#{self.class}@#{object_id} {path: #{@path.inspect}, line: #{@lineno}}>) + end + + private + + # Internal: Prepare the source data for parsing. # + # Converts the source data into an Array of lines ready for parsing. If the +:normalize+ option is set, this method + # coerces the encoding of each line to UTF-8 and strips trailing whitespace, including the newline. (This whitespace + # cleaning is very important to how Asciidoctor works). Subclasses may choose to perform additional preparation. # - # Returns A string summary of this reader, which contains the path and line information - def to_s - line_info + # data - A String Array or String of source data to be normalized. + # opts - A Hash of options to control how lines are prepared. + # :normalize - Enables line normalization, which coerces the encoding to UTF-8 and removes trailing whitespace + # (optional, default: false). + # + # Returns A String Array of source lines. If the source data is an Array, this method returns a copy. + def prepare_lines data, opts = {} + if opts[:normalize] + ::Array === data ? (Helpers.prepare_source_array data) : (Helpers.prepare_source_string data) + elsif ::Array === data + data.drop 0 + elsif data + data.split LF, -1 + else + [] + end + rescue + if (::Array === data ? data.join : data.to_s).valid_encoding? + raise + else + raise ::ArgumentError, 'source is either binary or contains invalid Unicode data' + end + end + + # Internal: Processes a previously unvisited line + # + # By default, this method marks the line as processed + # by incrementing the look_ahead counter and returns + # the line unmodified. + # + # Returns The String line the Reader should make available to the next + # invocation of Reader#read_line or nil if the Reader should drop the line, + # advance to the next line and process it. + def process_line line + @look_ahead += 1 if @process_lines + line end end @@ -550,30 +611,199 @@ # directives as each line is read off the Array of lines. class PreprocessorReader < Reader attr_reader :include_stack - attr_reader :includes # Public: Initialize the PreprocessorReader object - def initialize document, data = nil, cursor = nil + def initialize document, data = nil, cursor = nil, opts = {} @document = document - super data, cursor, :normalize => true - include_depth_default = document.attributes.fetch('max-include-depth', 64).to_i - include_depth_default = 0 if include_depth_default < 0 - # track both absolute depth for comparing to size of include stack and relative depth for reporting - @maxdepth = {:abs => include_depth_default, :rel => include_depth_default} + super data, cursor, opts + if (default_include_depth = (document.attributes['max-include-depth'] || 64).to_i) > 0 + # track absolute max depth, current max depth for comparing to include stack size, and relative max depth for reporting + @maxdepth = { abs: default_include_depth, curr: default_include_depth, rel: default_include_depth } + else + # if @maxdepth is not set, built-in include functionality is disabled + @maxdepth = nil + end @include_stack = [] - @includes = (document.references[:includes] ||= []) + @includes = document.catalog[:includes] @skipping = false @conditional_stack = [] @include_processor_extensions = nil end + # (see Reader#has_more_lines?) + def has_more_lines? + peek_line ? true : false + end + + # (see Reader#empty?) + def empty? + peek_line ? false : true + end + alias eof? empty? + + # Public: Override the Reader#peek_line method to pop the include + # stack if the last line has been reached and there's at least + # one include on the stack. + # + # Returns the next line of the source data as a String if there are lines remaining + # in the current include context or a parent include context. + # Returns nothing if there are no more lines remaining and the include stack is empty. + def peek_line direct = false + if (line = super) + line + elsif @include_stack.empty? + nil + else + pop_include + peek_line direct + end + end + + # Public: Push source onto the front of the reader and switch the context + # based on the file, document-relative path and line information given. + # + # This method is typically used in an IncludeProcessor to add source + # read from the target specified. + # + # Examples + # + # path = 'partial.adoc' + # file = File.expand_path path + # data = File.read file + # reader.push_include data, file, path + # + # Returns this Reader object. + def push_include data, file = nil, path = nil, lineno = 1, attributes = {} + @include_stack << [@lines, @file, @dir, @path, @lineno, @maxdepth, @process_lines] + if (@file = file) + # NOTE if file is not a string, assume it's a URI + if ::String === file + @dir = ::File.dirname file + elsif RUBY_ENGINE_OPAL + @dir = ::URI.parse ::File.dirname(file = file.to_s) + else + # NOTE this intentionally throws an error if URI has no path + (@dir = file.dup).path = (dir = ::File.dirname file.path) == '/' ? '' : dir + file = file.to_s + end + @path = (path ||= ::File.basename file) + # only process lines in AsciiDoc files + if (@process_lines = file.end_with?(*ASCIIDOC_EXTENSIONS.keys)) + @includes[path.slice 0, (path.rindex '.')] = attributes['partial-option'] ? nil : true + end + else + @dir = '.' + # we don't know what file type we have, so assume AsciiDoc + @process_lines = true + if (@path = path) + @includes[Helpers.rootname path] = attributes['partial-option'] ? nil : true + else + @path = '' + end + end + + @lineno = lineno + + if @maxdepth && (attributes.key? 'depth') + if (rel_maxdepth = attributes['depth'].to_i) > 0 + if (curr_maxdepth = @include_stack.size + rel_maxdepth) > (abs_maxdepth = @maxdepth[:abs]) + # if relative depth exceeds absolute max depth, effectively ignore relative depth request + curr_maxdepth = rel_maxdepth = abs_maxdepth + end + @maxdepth = { abs: abs_maxdepth, curr: curr_maxdepth, rel: rel_maxdepth } + else + @maxdepth = { abs: @maxdepth[:abs], curr: @include_stack.size, rel: 0 } + end + end + + # effectively fill the buffer + if (@lines = prepare_lines data, normalize: true, condense: false, indent: attributes['indent']).empty? + pop_include + else + # FIXME we eventually want to handle leveloffset without affecting the lines + if attributes.key? 'leveloffset' + @lines.unshift '' + @lines.unshift %(:leveloffset: #{attributes['leveloffset']}) + @lines << '' + if (old_leveloffset = @document.attr 'leveloffset') + @lines << %(:leveloffset: #{old_leveloffset}) + else + @lines << ':leveloffset!:' + end + # compensate for these extra lines + @lineno -= 2 + end + + # FIXME kind of a hack + #Document::AttributeEntry.new('infile', @file).save_to_next_block @document + #Document::AttributeEntry.new('indir', @dir).save_to_next_block @document + @look_ahead = 0 + end + self + end + + def include_depth + @include_stack.size + end + + # Public: Reports whether pushing an include on the include stack exceeds the max include depth. + # + # Returns nil if no max depth is set and includes are disabled (max-include-depth=0), false if the current max depth + # will not be exceeded, and the relative max include depth if the current max depth will be exceed. + def exceeds_max_depth? + @maxdepth && @include_stack.size >= @maxdepth[:curr] && @maxdepth[:rel] + end + alias exceeded_max_depth? exceeds_max_depth? + + # TODO Document this override + # also, we now have the field in the super class, so perhaps + # just implement the logic there? + def shift + if @unescape_next_line + @unescape_next_line = false + (line = super).slice 1, line.length + else + super + end + end + + def include_processors? + if @include_processor_extensions.nil? + if @document.extensions? && @document.extensions.include_processors? + !!(@include_processor_extensions = @document.extensions.include_processors) + else + @include_processor_extensions = false + end + else + @include_processor_extensions != false + end + end + + def create_include_cursor file, path, lineno + if ::String === file + dir = ::File.dirname file + elsif RUBY_ENGINE_OPAL + dir = ::File.dirname(file = file.to_s) + else + dir = (dir = ::File.dirname file.path) == '' ? '/' : dir + file = file.to_s + end + Cursor.new file, dir, path, lineno + end + + def to_s + %(#<#{self.class}@#{object_id} {path: #{@path.inspect}, line: #{@lineno}, include depth: #{@include_stack.size}, include stack: [#{@include_stack.map {|inc| inc.to_s }.join ', '}]}>) + end + + private + def prepare_lines data, opts = {} result = super # QUESTION should this work for AsciiDoc table cell content? Currently it does not. - if @document && (@document.attributes.has_key? 'skip-front-matter') + if @document && @document.attributes['skip-front-matter'] if (front_matter = skip_front_matter! result) - @document.attributes['front-matter'] = front_matter * EOL + @document.attributes['front-matter'] = front_matter.join LF end end @@ -582,9 +812,7 @@ result.pop while (last = result[-1]) && last.empty? end - if opts[:indent] - Parser.adjust_indentation! result, opts[:indent], (@document.attr 'tabsize') - end + Parser.adjust_indentation! result, opts[:indent].to_i, (@document.attr 'tabsize').to_i if opts[:indent] result end @@ -594,50 +822,46 @@ if line.empty? @look_ahead += 1 - return '' + return line end # NOTE highly optimized if line.end_with?(']') && !line.start_with?('[') && line.include?('::') - if line.include?('if') && (match = ConditionalDirectiveRx.match(line)) + if (line.include? 'if') && ConditionalDirectiveRx =~ line # if escaped, mark as processed and return line unescaped - if line.start_with?('\\') + if $1 == '\\' @unescape_next_line = true @look_ahead += 1 - line[1..-1] + line.slice 1, line.length + elsif preprocess_conditional_directive $2, $3, $4, $5 + # move the pointer past the conditional line + shift + # treat next line as uncharted territory + nil else - if preprocess_conditional_inclusion(*match.captures) - # move the pointer past the conditional line - advance - # treat next line as uncharted territory - nil - else - # the line was not a valid conditional line - # mark it as visited and return it - @look_ahead += 1 - line - end + # the line was not a valid conditional line + # mark it as visited and return it + @look_ahead += 1 + line end elsif @skipping - advance + shift nil - elsif ((escaped = line.start_with?('\\include::')) || line.start_with?('include::')) && (match = IncludeDirectiveRx.match(line)) + elsif (line.start_with? 'inc', '\\inc') && IncludeDirectiveRx =~ line # if escaped, mark as processed and return line unescaped - if escaped + if $1 == '\\' @unescape_next_line = true @look_ahead += 1 - line[1..-1] + line.slice 1, line.length + # QUESTION should we strip whitespace from raw attributes in Substitutors#parse_attributes? (check perf) + elsif preprocess_include_directive $2, $3 + # peek again since the content has changed + nil else - # QUESTION should we strip whitespace from raw attributes in Substitutors#parse_attributes? (check perf) - if preprocess_include match[1], match[2].strip - # peek again since the content has changed - nil - else - # the line was not a valid include line and is unchanged - # mark it as visited and return it - @look_ahead += 1 - line - end + # the line was not a valid include line and is unchanged + # mark it as visited and return it + @look_ahead += 1 + line end else # NOTE optimization to inline super @@ -645,7 +869,7 @@ line end elsif @skipping - advance + shift nil else # NOTE optimization to inline super @@ -654,141 +878,119 @@ end end - # Public: Override the Reader#peek_line method to pop the include - # stack if the last line has been reached and there's at least - # one include on the stack. + # Internal: Preprocess the directive to conditionally include or exclude content. # - # Returns the next line of the source data as a String if there are lines remaining - # in the current include context or a parent include context. - # Returns nothing if there are no more lines remaining and the include stack is empty. - def peek_line direct = false - if (line = super) - line - elsif @include_stack.empty? - nil - else - pop_include - peek_line direct - end - end - - # Internal: Preprocess the directive (macro) to conditionally include content. - # - # Preprocess the conditional inclusion directive (ifdef, ifndef, ifeval, - # endif) under the cursor. If the Reader is currently skipping content, then - # simply track the open and close delimiters of any nested conditional - # blocks. If the Reader is not skipping, mark whether the condition is - # satisfied and continue preprocessing recursively until the next line of - # available content is found. - # - # directive - The conditional inclusion directive (ifdef, ifndef, ifeval, endif) - # target - The target, which is the name of one or more attributes that are - # used in the condition (blank in the case of the ifeval directive) - # delimiter - The conditional delimiter for multiple attributes ('+' means all - # attributes must be defined or undefined, ',' means any of the attributes - # can be defined or undefined. - # text - The text associated with this directive (occurring between the square brackets) - # Used for a single-line conditional block in the case of the ifdef or - # ifndef directives, and for the conditional expression for the ifeval directive. + # Preprocess the conditional directive (ifdef, ifndef, ifeval, endif) under + # the cursor. If Reader is currently skipping content, then simply track the + # open and close delimiters of any nested conditional blocks. If Reader is + # not skipping, mark whether the condition is satisfied and continue + # preprocessing recursively until the next line of available content is + # found. + # + # keyword - The conditional inclusion directive (ifdef, ifndef, ifeval, endif) + # target - The target, which is the name of one or more attributes that are + # used in the condition (blank in the case of the ifeval directive) + # delimiter - The conditional delimiter for multiple attributes ('+' means all + # attributes must be defined or undefined, ',' means any of the attributes + # can be defined or undefined. + # text - The text associated with this directive (occurring between the square brackets) + # Used for a single-line conditional block in the case of the ifdef or + # ifndef directives, and for the conditional expression for the ifeval directive. # # Returns a Boolean indicating whether the cursor should be advanced - def preprocess_conditional_inclusion directive, target, delimiter, text - # must have a target before brackets if ifdef or ifndef - # must not have text between brackets if endif - # don't honor match if it doesn't meet this criteria - # QUESTION should we warn for these bogus declarations? - if ((directive == 'ifdef' || directive == 'ifndef') && target.empty?) || - (directive == 'endif' && text) - return false - end - + def preprocess_conditional_directive keyword, target, delimiter, text # attributes are case insensitive - target = target.downcase + target = target.downcase unless (no_target = target.empty?) - if directive == 'endif' - stack_size = @conditional_stack.size - if stack_size > 0 - pair = @conditional_stack[-1] - if target.empty? || target == pair[:target] - @conditional_stack.pop - @skipping = @conditional_stack.empty? ? false : @conditional_stack[-1][:skipping] - else - warn %(asciidoctor: ERROR: #{line_info}: mismatched macro: endif::#{target}[], expected endif::#{pair[:target]}[]) - end + if keyword == 'endif' + if text + logger.error message_with_context %(malformed preprocessor directive - text not permitted: endif::#{target}[#{text}]), source_location: cursor + elsif @conditional_stack.empty? + logger.error message_with_context %(unmatched preprocessor directive: endif::#{target}[]), source_location: cursor + elsif no_target || target == (pair = @conditional_stack[-1])[:target] + @conditional_stack.pop + @skipping = @conditional_stack.empty? ? false : @conditional_stack[-1][:skipping] else - warn %(asciidoctor: ERROR: #{line_info}: unmatched macro: endif::#{target}[]) + logger.error message_with_context %(mismatched preprocessor directive: endif::#{target}[], expected endif::#{pair[:target]}[]), source_location: cursor end return true - end - - skip = false - unless @skipping + elsif @skipping + skip = false + else # QUESTION any way to wrap ifdef & ifndef logic up together? - case directive + case keyword when 'ifdef' + if no_target + logger.error message_with_context %(malformed preprocessor directive - missing target: ifdef::[#{text}]), source_location: cursor + return true + end case delimiter - when nil - # if the attribute is undefined, then skip - skip = !@document.attributes.has_key?(target) when ',' - # if any attribute is defined, then don't skip - skip = target.split(',').none? {|name| @document.attributes.has_key? name } + # skip if no attribute is defined + skip = target.split(',', -1).none? {|name| @document.attributes.key? name } when '+' - # if any attribute is undefined, then skip - skip = target.split('+').any? {|name| !@document.attributes.has_key? name } + # skip if any attribute is undefined + skip = target.split('+', -1).any? {|name| !@document.attributes.key? name } + else + # if the attribute is undefined, then skip + skip = !@document.attributes.key?(target) end when 'ifndef' + if no_target + logger.error message_with_context %(malformed preprocessor directive - missing target: ifndef::[#{text}]), source_location: cursor + return true + end case delimiter - when nil - # if the attribute is defined, then skip - skip = @document.attributes.has_key?(target) when ',' - # if any attribute is undefined, then don't skip - skip = target.split(',').none? {|name| !@document.attributes.has_key? name } + # skip if any attribute is defined + skip = target.split(',', -1).any? {|name| @document.attributes.key? name } when '+' - # if any attribute is defined, then skip - skip = target.split('+').any? {|name| @document.attributes.has_key? name } + # skip if all attributes are defined + skip = target.split('+', -1).all? {|name| @document.attributes.key? name } + else + # if the attribute is defined, then skip + skip = @document.attributes.key?(target) end when 'ifeval' - # the text in brackets must match an expression - # don't honor match if it doesn't meet this criteria - if !target.empty? || !(expr_match = EvalExpressionRx.match(text.strip)) - return false - end - - lhs = resolve_expr_val expr_match[1] - rhs = resolve_expr_val expr_match[3] - - # regex enforces a restricted set of math-related operations - if (op = expr_match[2]) == '!=' - skip = lhs.send :==, rhs + if no_target + # the text in brackets must match a conditional expression + if text && EvalExpressionRx =~ text.strip + lhs = $1 + op = $2 + rhs = $3 + # regex enforces a restricted set of math-related operations (==, !=, <=, >=, <, >) + skip = ((resolve_expr_val lhs).send op, (resolve_expr_val rhs)) ? false : true + else + logger.error message_with_context %(malformed preprocessor directive - #{text ? 'invalid expression' : 'missing expression'}: ifeval::[#{text}]), source_location: cursor + return true + end else - skip = !(lhs.send op.to_sym, rhs) + logger.error message_with_context %(malformed preprocessor directive - target not permitted: ifeval::#{target}[#{text}]), source_location: cursor + return true end end end # conditional inclusion block - if directive == 'ifeval' || !text + if keyword == 'ifeval' || !text @skipping = true if skip - @conditional_stack << {:target => target, :skip => skip, :skipping => @skipping} + @conditional_stack << { target: target, skip: skip, skipping: @skipping } # single line conditional inclusion else unless @skipping || skip - # FIXME slight hack to skip past conditional line - # but keep our synthetic line marked as processed - # QUESTION can we use read_line true and unshift twice instead? - conditional_line = peek_line true replace_next_line text.rstrip - unshift conditional_line - return true + # HACK push dummy line to stand in for the opening conditional directive that's subsequently dropped + unshift '' + # NOTE force line to be processed again if it looks like an include directive + # QUESTION should we just call preprocess_include_directive here? + @look_ahead -= 1 if text.start_with? 'include::' end end true end - # Internal: Preprocess the directive (macro) to include the target document. + # Internal: Preprocess the directive to include lines from another document. # # Preprocess the directive to include the target document. The scenarios # are as follows: @@ -805,267 +1007,251 @@ # # If none of the above apply, emit the include directive line verbatim. # - # target - The name of the source document to include as specified in the - # target slot of the include::[] macro - # - # Returns a Boolean indicating whether the line under the cursor has changed. - def preprocess_include raw_target, raw_attributes - if (target = @document.sub_attributes raw_target, :attribute_missing => 'drop-line').empty? - advance - if @document.attributes.fetch('attribute-missing', Compliance.attribute_missing) == 'skip' - unshift %(Unresolved directive in #{@path} - include::#{raw_target}[#{raw_attributes}]) + # target - The unsubstituted String name of the target document to include as specified in the + # target slot of the include directive. + # attrlist - An attribute list String, which is the text between the square brackets of the + # include directive. + # + # Returns a [Boolean] indicating whether the line under the cursor was changed. To skip over the + # directive, call shift and return true. + def preprocess_include_directive target, attrlist + doc = @document + if ((expanded_target = target).include? ATTR_REF_HEAD) && + (expanded_target = doc.sub_attributes target, attribute_missing: ((attr_missing = doc.attributes['attribute-missing'] || Compliance.attribute_missing) == 'warn' ? 'drop-line' : attr_missing)).empty? + if attr_missing == 'drop-line' && (doc.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? + logger.info { message_with_context %(include dropped due to missing attribute: include::#{target}[#{attrlist}]), source_location: cursor } + shift + true + elsif (doc.parse_attributes attrlist, [], sub_input: true)['optional-option'] + logger.info { message_with_context %(optional include dropped #{attr_missing == 'warn' && (doc.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? ? 'due to missing attribute' : 'because resolved target is blank'}: include::#{target}[#{attrlist}]), source_location: cursor } + shift + true + else + logger.warn message_with_context %(include dropped #{attr_missing == 'warn' && (doc.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? ? 'due to missing attribute' : 'because resolved target is blank'}: include::#{target}[#{attrlist}]), source_location: cursor + # QUESTION should this line include target or expanded_target (or escaped target?) + replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{attrlist}]) end - true - # assume that if an include processor is given, the developer wants - # to handle when and how to process the include - elsif include_processors? && - (extension = @include_processor_extensions.find {|candidate| candidate.instance.handles? target }) - advance - # FIXME parse attributes if requested by extension - extension.process_method[@document, self, target, AttributeList.new(raw_attributes).parse] + elsif include_processors? && (ext = @include_processor_extensions.find {|candidate| candidate.instance.handles? expanded_target }) + shift + # FIXME parse attributes only if requested by extension + ext.process_method[doc, self, expanded_target, (doc.parse_attributes attrlist, [], sub_input: true)] true # if running in SafeMode::SECURE or greater, don't process this directive # however, be friendly and at least make it a link to the source document - elsif @document.safe >= SafeMode::SECURE + elsif doc.safe >= SafeMode::SECURE # FIXME we don't want to use a link macro if we are in a verbatim context - replace_next_line %(link:#{target}[]) - true - elsif (abs_maxdepth = @maxdepth[:abs]) > 0 - if @include_stack.size >= abs_maxdepth - warn %(asciidoctor: ERROR: #{line_info}: maximum include depth of #{@maxdepth[:rel]} exceeded) - return false - end - if ::RUBY_ENGINE_OPAL - # NOTE resolves uri relative to currently loaded document - # NOTE we defer checking if file exists and catch the 404 error if it does not - # TODO only use this logic if env-browser is set - target_type = :file - include_file = path = if @include_stack.empty? - ::Dir.pwd == @document.base_dir ? target : (::File.join @dir, target) - else - ::File.join @dir, target - end - elsif Helpers.uriish? target - unless @document.attributes.has_key? 'allow-uri-read' - replace_next_line %(link:#{target}[]) - return true - end + replace_next_line %(link:#{expanded_target}[]) + elsif @maxdepth + if @include_stack.size >= @maxdepth[:curr] + logger.error message_with_context %(maximum include depth of #{@maxdepth[:rel]} exceeded), source_location: cursor + return + end - target_type = :uri - include_file = path = target - if @document.attributes.has_key? 'cache-uri' - # caching requires the open-uri-cached gem to be installed - # processing will be automatically aborted if these libraries can't be opened - Helpers.require_library 'open-uri/cached', 'open-uri-cached' unless defined? ::OpenURI::Cache - elsif !::RUBY_ENGINE_OPAL - # autoload open-uri - ::OpenURI - end + parsed_attrs = doc.parse_attributes attrlist, [], sub_input: true + inc_path, target_type, relpath = resolve_include_path expanded_target, attrlist, parsed_attrs + if target_type == :file + reader = ::File.method :open + read_mode = FILE_READ_MODE + elsif target_type == :uri + reader = ::OpenURI.method :open_uri + read_mode = URI_READ_MODE else - target_type = :file - # include file is resolved relative to dir of current include, or base_dir if within original docfile - include_file = @document.normalize_system_path(target, @dir, nil, :target_name => 'include file') - unless ::File.file? include_file - warn %(asciidoctor: WARNING: #{line_info}: include file not found: #{include_file}) - replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{raw_attributes}]) - return true - end - #path = @document.relative_path include_file - path = PathResolver.new.relative_path include_file, @document.base_dir + # NOTE if target_type is not set, inc_path is a boolean to skip over (false) or reevaluate (true) the current line + return inc_path end - inc_lines = nil - tags = nil - attributes = {} - if !raw_attributes.empty? - # QUESTION should we use @document.parse_attribues? - attributes = AttributeList.new(raw_attributes).parse - if attributes.has_key? 'lines' - inc_lines = [] - attributes['lines'].split(DataDelimiterRx).each do |linedef| - if linedef.include?('..') - from, to = linedef.split('..', 2).map(&:to_i) - if to == -1 - inc_lines << from - inc_lines << 1.0/0.0 - else - inc_lines.concat ::Range.new(from, to).to_a - end + inc_linenos = inc_tags = nil + if attrlist + if parsed_attrs.key? 'lines' + inc_linenos = [] + (split_delimited_value parsed_attrs['lines']).each do |linedef| + if linedef.include? '..' + from, _, to = linedef.partition '..' + inc_linenos += (to.empty? || (to = to.to_i) < 0) ? [from.to_i, 1.0/0.0] : (from.to_i..to).to_a else - inc_lines << linedef.to_i + inc_linenos << linedef.to_i end end - inc_lines = inc_lines.sort.uniq - elsif attributes.has_key? 'tag' - tags = [attributes['tag']].to_set - elsif attributes.has_key? 'tags' - tags = attributes['tags'].split(DataDelimiterRx).to_set - end - end - if inc_lines - unless inc_lines.empty? - selected = [] - inc_line_offset = 0 - inc_lineno = 0 - begin - open(include_file, 'r') do |f| - f.each_line do |l| - inc_lineno += 1 - take = inc_lines[0] - if ::Float === take && take.infinite? - selected.push l - inc_line_offset = inc_lineno if inc_line_offset == 0 - else - if f.lineno == take - selected.push l - inc_line_offset = inc_lineno if inc_line_offset == 0 - inc_lines.shift - end - break if inc_lines.empty? + inc_linenos = inc_linenos.empty? ? nil : inc_linenos.sort.uniq + elsif parsed_attrs.key? 'tag' + unless (tag = parsed_attrs['tag']).empty? || tag == '!' + inc_tags = (tag.start_with? '!') ? { (tag.slice 1, tag.length) => false } : { tag => true } + end + elsif parsed_attrs.key? 'tags' + inc_tags = {} + (split_delimited_value parsed_attrs['tags']).each do |tagdef| + if tagdef.start_with? '!' + inc_tags[tagdef.slice 1, tagdef.length] = false + else + inc_tags[tagdef] = true + end unless tagdef.empty? || tagdef == '!' + end + inc_tags = nil if inc_tags.empty? + end + end + + if inc_linenos + inc_lines, inc_offset, inc_lineno = [], nil, 0 + begin + reader.call inc_path, read_mode do |f| + select_remaining = nil + f.each_line do |l| + inc_lineno += 1 + if select_remaining || (::Float === (select = inc_linenos[0]) && (select_remaining = select.infinite?)) + # NOTE record line where we started selecting + inc_offset ||= inc_lineno + inc_lines << l + else + if select == inc_lineno + # NOTE record line where we started selecting + inc_offset ||= inc_lineno + inc_lines << l + inc_linenos.shift end + break if inc_linenos.empty? end end - rescue - warn %(asciidoctor: WARNING: #{line_info}: include #{target_type} not readable: #{include_file}) - replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{raw_attributes}]) - return true end - advance - # FIXME not accounting for skipped lines in reader line numbering - push_include selected, include_file, path, inc_line_offset, attributes + rescue + logger.error message_with_context %(include #{target_type} not readable: #{inc_path}), source_location: cursor + return replace_next_line %(Unresolved directive in #{@path} - include::#{expanded_target}[#{attrlist}]) end - elsif tags - unless tags.empty? - selected = [] - inc_line_offset = 0 - inc_lineno = 0 - active_tag = nil - tags_found = ::Set.new - begin - open(include_file, 'r') do |f| - f.each_line do |l| - inc_lineno += 1 - # must force encoding here since we're performing String operations on line - l.force_encoding(::Encoding::UTF_8) if FORCE_ENCODING - l = l.rstrip - # tagged lines in XML may end with '-->' - tl = l.chomp('-->').rstrip - if active_tag - if tl.end_with?(%(end::#{active_tag}[])) - active_tag = nil - else - selected.push l unless tl.end_with?('[]') && TagDirectiveRx =~ tl - inc_line_offset = inc_lineno if inc_line_offset == 0 - end - else - tags.each do |tag| - if tl.end_with?(%(tag::#{tag}[])) - active_tag = tag - tags_found << tag - break + shift + # FIXME not accounting for skipped lines in reader line numbering + if inc_offset + parsed_attrs['partial-option'] = '' + push_include inc_lines, inc_path, relpath, inc_offset, parsed_attrs + end + elsif inc_tags + inc_lines, inc_offset, inc_lineno, tag_stack, tags_used, active_tag = [], nil, 0, [], ::Set.new, nil + if inc_tags.key? '**' + if inc_tags.key? '*' + select = base_select = inc_tags.delete '**' + wildcard = inc_tags.delete '*' + else + select = base_select = wildcard = inc_tags.delete '**' + end + else + select = base_select = !(inc_tags.value? true) + wildcard = inc_tags.delete '*' + end + begin + reader.call inc_path, read_mode do |f| + dbl_co, dbl_sb = '::', '[]' + f.each_line do |l| + inc_lineno += 1 + if (l.include? dbl_co) && (l.include? dbl_sb) && TagDirectiveRx =~ l + this_tag = $2 + if $1 # end tag + if this_tag == active_tag + tag_stack.pop + active_tag, select = tag_stack.empty? ? [nil, base_select] : tag_stack[-1] + elsif inc_tags.key? this_tag + include_cursor = create_include_cursor inc_path, expanded_target, inc_lineno + if (idx = tag_stack.rindex {|key, _| key == this_tag }) + idx == 0 ? tag_stack.shift : (tag_stack.delete_at idx) + logger.warn message_with_context %(mismatched end tag (expected '#{active_tag}' but found '#{this_tag}') at line #{inc_lineno} of include #{target_type}: #{inc_path}), source_location: cursor, include_location: include_cursor + else + logger.warn message_with_context %(unexpected end tag '#{this_tag}' at line #{inc_lineno} of include #{target_type}: #{inc_path}), source_location: cursor, include_location: include_cursor end - end if tl.end_with?('[]') && TagDirectiveRx =~ tl + end + elsif inc_tags.key? this_tag + tags_used << this_tag + # QUESTION should we prevent tag from being selected when enclosing tag is excluded? + tag_stack << [(active_tag = this_tag), (select = inc_tags[this_tag]), inc_lineno] + elsif !wildcard.nil? + select = active_tag && !select ? false : wildcard + tag_stack << [(active_tag = this_tag), select, inc_lineno] end + elsif select + # NOTE record the line where we started selecting + inc_offset ||= inc_lineno + inc_lines << l end end - rescue - warn %(asciidoctor: WARNING: #{line_info}: include #{target_type} not readable: #{include_file}) - replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{raw_attributes}]) - return true end - unless (missing_tags = tags.to_a - tags_found.to_a).empty? - warn %(asciidoctor: WARNING: #{line_info}: tag#{missing_tags.size > 1 ? 's' : nil} '#{missing_tags * ','}' not found in include #{target_type}: #{include_file}) + rescue + logger.error message_with_context %(include #{target_type} not readable: #{inc_path}), source_location: cursor + return replace_next_line %(Unresolved directive in #{@path} - include::#{expanded_target}[#{attrlist}]) + end + unless tag_stack.empty? + tag_stack.each do |tag_name, _, tag_lineno| + logger.warn message_with_context %(detected unclosed tag '#{tag_name}' starting at line #{tag_lineno} of include #{target_type}: #{inc_path}), source_location: cursor, include_location: (create_include_cursor inc_path, expanded_target, tag_lineno) end - advance + end + unless (missing_tags = inc_tags.keys - tags_used.to_a).empty? + logger.warn message_with_context %(tag#{missing_tags.size > 1 ? 's' : ''} '#{missing_tags.join ', '}' not found in include #{target_type}: #{inc_path}), source_location: cursor + end + shift + if inc_offset + parsed_attrs['partial-option'] = '' unless base_select && wildcard && inc_tags.empty? # FIXME not accounting for skipped lines in reader line numbering - push_include selected, include_file, path, inc_line_offset, attributes + push_include inc_lines, inc_path, relpath, inc_offset, parsed_attrs end else begin - # NOTE read content first so that we only advance cursor if IO operation succeeds - include_content = open(include_file, 'r') {|f| f.read } - advance - push_include include_content, include_file, path, 1, attributes + # NOTE read content before shift so cursor is only advanced if IO operation succeeds + inc_content = reader.call(inc_path, read_mode) {|f| f.read } + shift + push_include inc_content, inc_path, relpath, 1, parsed_attrs rescue - warn %(asciidoctor: WARNING: #{line_info}: include #{target_type} not readable: #{include_file}) - replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{raw_attributes}]) - return true + logger.error message_with_context %(include #{target_type} not readable: #{inc_path}), source_location: cursor + return replace_next_line %(Unresolved directive in #{@path} - include::#{expanded_target}[#{attrlist}]) end end true - else - false end end - # Public: Push source onto the front of the reader and switch the context - # based on the file, document-relative path and line information given. - # - # This method is typically used in an IncludeProcessor to add source - # read from the target specified. + # Internal: Resolve the target of an include directive. # - # Examples - # - # path = 'partial.adoc' - # file = File.expand_path path - # data = IO.read file - # reader.push_include data, file, path - # - # Returns this Reader object. - def push_include data, file = nil, path = nil, lineno = 1, attributes = {} - @include_stack << [@lines, @file, @dir, @path, @lineno, @maxdepth, @process_lines] - if file - @file = file - @dir = File.dirname file - # only process lines in AsciiDoc files - @process_lines = ASCIIDOC_EXTENSIONS[::File.extname(file)] - else - @file = nil - @dir = '.' # right? - # we don't know what file type we have, so assume AsciiDoc - @process_lines = true - end - - @path = if path - @includes << Helpers.rootname(path) - path - else - '' - end - - @lineno = lineno - - if attributes.has_key? 'depth' - depth = attributes['depth'].to_i - depth = 1 if depth <= 0 - @maxdepth = {:abs => (@include_stack.size - 1) + depth, :rel => depth} - end - - # effectively fill the buffer - if (@lines = prepare_lines data, :normalize => true, :condense => false, :indent => attributes['indent']).empty? - pop_include + # An internal method to resolve the target of an include directive. This method must return an + # Array containing the resolved (absolute) path of the target, the target type (:file or :uri), + # and the path of the target relative to the outermost document. Alternately, the method may + # return a boolean to halt processing of the include directive line and to indicate whether the + # cursor should be advanced beyond this line (true) or the line should be reprocessed (false). + # + # This method is overridden in Asciidoctor.js to resolve the target of an include in the browser + # environment. + # + # target - A String containing the unresolved include target. + # (Attribute references in target value have already been resolved). + # attrlist - An attribute list String (i.e., the text between the square brackets). + # attributes - A Hash of attributes parsed from attrlist. + # + # Returns An Array containing the resolved (absolute) include path, the target type, and the path + # relative to the outermost document. May also return a boolean to halt processing of the include. + def resolve_include_path target, attrlist, attributes + doc = @document + if (Helpers.uriish? target) || (::String === @dir ? nil : (target = %(#{@dir}/#{target}))) + return replace_next_line %(link:#{target}[#{attrlist}]) unless doc.attr? 'allow-uri-read' + if doc.attr? 'cache-uri' + # caching requires the open-uri-cached gem to be installed + # processing will be automatically aborted if these libraries can't be opened + Helpers.require_library 'open-uri/cached', 'open-uri-cached' unless defined? ::OpenURI::Cache + elsif !RUBY_ENGINE_OPAL + # autoload open-uri + ::OpenURI + end + [(::URI.parse target), :uri, target] else - # FIXME we eventually want to handle leveloffset without affecting the lines - if attributes.has_key? 'leveloffset' - @lines.unshift '' - @lines.unshift %(:leveloffset: #{attributes['leveloffset']}) - @lines.push '' - if (old_leveloffset = @document.attr 'leveloffset') - @lines.push %(:leveloffset: #{old_leveloffset}) + # include file is resolved relative to dir of current include, or base_dir if within original docfile + inc_path = doc.normalize_system_path target, @dir, nil, target_name: 'include file' + unless ::File.file? inc_path + if attributes['optional-option'] + logger.info { message_with_context %(optional include dropped because include file not found: #{inc_path}), source_location: cursor } + shift + return true else - @lines.push ':leveloffset!:' + logger.error message_with_context %(include file not found: #{inc_path}), source_location: cursor + return replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{attrlist}]) end - # compensate for these extra lines - @lineno -= 2 end - - # FIXME kind of a hack - #Document::AttributeEntry.new('infile', @file).save_to_next_block @document - #Document::AttributeEntry.new('indir', @dir).save_to_next_block @document - @eof = false - @look_ahead = 0 + # NOTE relpath is the path relative to the root document (or base_dir, if set) + # QUESTION should we move relative_path method to Document + relpath = doc.path_resolver.relative_path inc_path, doc.base_dir + [inc_path, :file, relpath] end - self end def pop_include @@ -1074,46 +1260,26 @@ # FIXME kind of a hack #Document::AttributeEntry.new('infile', @file).save_to_next_block @document #Document::AttributeEntry.new('indir', ::File.dirname(@file)).save_to_next_block @document - @eof = @lines.empty? @look_ahead = 0 - end - nil - end - - def include_depth - @include_stack.size - end - - def exceeded_max_depth? - if (abs_maxdepth = @maxdepth[:abs]) > 0 && @include_stack.size >= abs_maxdepth - @maxdepth[:rel] - else - false + nil end end - # TODO Document this override - # also, we now have the field in the super class, so perhaps - # just implement the logic there? - def shift - if @unescape_next_line - @unescape_next_line = false - super[1..-1] - else - super - end + # Private: Split delimited value on comma (if found), otherwise semi-colon + def split_delimited_value val + (val.include? ',') ? (val.split ',') : (val.split ';') end # Private: Ignore front-matter, commonly used in static site generators def skip_front_matter! data, increment_linenos = true front_matter = nil if data[0] == '---' - original_data = data.dup - front_matter = [] + original_data = data.drop 0 data.shift + front_matter = [] @lineno += 1 if increment_linenos while !data.empty? && data[0] != '---' - front_matter.push data.shift + front_matter << data.shift @lineno += 1 if increment_linenos end @@ -1164,52 +1330,32 @@ if ((val.start_with? '"') && (val.end_with? '"')) || ((val.start_with? '\'') && (val.end_with? '\'')) quoted = true - val = val[1...-1] + val = val.slice 1, (val.length - 1) else quoted = false end # QUESTION should we substitute first? # QUESTION should we also require string to be single quoted (like block attribute values?) - if val.include? '{' - val = @document.sub_attributes val, :attribute_missing => 'drop' - end + val = @document.sub_attributes val, attribute_missing: 'drop' if val.include? ATTR_REF_HEAD if quoted val + elsif val.empty? + nil + elsif val == 'true' + true + elsif val == 'false' + false + elsif val.rstrip.empty? + ' ' + elsif val.include? '.' + val.to_f else - if val.empty? - nil - elsif val == 'true' - true - elsif val == 'false' - false - elsif val.rstrip.empty? - ' ' - elsif val.include? '.' - val.to_f - else - # fallback to coercing to integer, since we - # require string values to be explicitly quoted - val.to_i - end - end - end - - def include_processors? - if @include_processor_extensions.nil? - if @document.extensions? && @document.extensions.include_processors? - !!(@include_processor_extensions = @document.extensions.include_processors) - else - @include_processor_extensions = false - end - else - @include_processor_extensions != false + # fallback to coercing to integer, since we + # require string values to be explicitly quoted + val.to_i end end - - def to_s - %(#<#{self.class}@#{object_id} {path: #{@path.inspect}, line #: #{@lineno}, include depth: #{@include_stack.size}, include stack: [#{@include_stack.map {|inc| inc.to_s}.join ', '}]}>) - end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/rouge_ext.rb asciidoctor-2.0.10/lib/asciidoctor/rouge_ext.rb --- asciidoctor-1.5.5/lib/asciidoctor/rouge_ext.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/rouge_ext.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,39 @@ +# frozen_string_literal: true +require 'rouge' unless defined? Rouge.version + +module Asciidoctor; module RougeExt; module Formatters + class HTMLTable < ::Rouge::Formatter + def initialize delegate, opts + @delegate = delegate + @start_line = opts[:start_line] || 1 + end + + def stream tokens + formatted_code = @delegate.format tokens + formatted_code += LF unless formatted_code.end_with? LF, HangingEndSpanTagCs + last_lineno = (first_lineno = @start_line) + (formatted_code.count LF) - 1 # assume number of newlines is constant + lineno_format = %(%#{(::Math.log10 last_lineno).floor + 1}i) + formatted_linenos = ((first_lineno..last_lineno).map {|lineno| sprintf lineno_format, lineno } << '').join LF + yield %(
    #{formatted_linenos}
    #{formatted_code}
    ) + end + end + + class HTMLLineHighlighter < ::Rouge::Formatter + def initialize delegate, opts + @delegate = delegate + @lines = opts[:lines] || [] + end + + def stream tokens + lineno = 0 + token_lines tokens do |tokens_in_line| + yield (@lines.include? lineno += 1) ? %(#{@delegate.format tokens_in_line}#{LF}) : %(#{@delegate.format tokens_in_line}#{LF}) + end + end + end + + LF = ?\n + HangingEndSpanTagCs = %(#{LF}
    ) + + private_constant :HangingEndSpanTagCs, :LF +end; end; end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/rx.rb asciidoctor-2.0.10/lib/asciidoctor/rx.rb --- asciidoctor-1.5.5/lib/asciidoctor/rx.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/rx.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,721 @@ +module Asciidoctor + # A collection of regular expression constants used by the parser. (For speed, these are not defined in the Rx module, + # but rather directly in the Asciidoctor module). + # + # NOTE The following pattern, which appears frequently, captures the contents between square brackets, ignoring + # escaped closing brackets (closing brackets prefixed with a backslash '\' character) + # + # Pattern: \[(|#{CC_ALL}*?[^\\])\] + # Matches: [enclosed text] and [enclosed [text\]], not [enclosed text \\] or [\\] (as these require a trailing space) + module Rx; end + + ## Document header + + # Matches the author info line immediately following the document title. + # + # Examples + # + # Doc Writer + # Mary_Sue Brontë + # + AuthorInfoLineRx = /^(#{CG_WORD}[#{CC_WORD}\-'.]*)(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +<([^>]+)>)?$/ + + # Matches the delimiter that separates multiple authors. + # + # Examples + # + # Doc Writer; Junior Writer + # + AuthorDelimiterRx = /;(?: |$)/ + + # Matches the revision info line, which appears immediately following + # the author info line beneath the document title. + # + # Examples + # + # v1.0 + # 2013-01-01 + # v1.0, 2013-01-01: Ring in the new year release + # 1.0, Jan 01, 2013 + # + RevisionInfoLineRx = /^(?:[^\d{]*(#{CC_ANY}*?),)? *(?!:)(#{CC_ANY}*?)(?: *(?!^),?: *(#{CC_ANY}*))?$/ + + # Matches the title and volnum in the manpage doctype. + # + # Examples + # + # = asciidoctor(1) + # = asciidoctor ( 1 ) + # + ManpageTitleVolnumRx = /^(#{CC_ANY}+?) *\( *(#{CC_ANY}+?) *\)$/ + + # Matches the name and purpose in the manpage doctype. + # + # Examples + # + # asciidoctor - converts AsciiDoc source files to HTML, DocBook and other formats + # + ManpageNamePurposeRx = /^(#{CC_ANY}+?) +- +(#{CC_ANY}+)$/ + + ## Preprocessor directives + + # Matches a conditional preprocessor directive (e.g., ifdef, ifndef, ifeval and endif). + # + # Examples + # + # ifdef::basebackend-html[] + # ifndef::theme[] + # ifeval::["{asciidoctor-version}" >= "0.1.0"] + # ifdef::asciidoctor[Asciidoctor!] + # endif::theme[] + # endif::basebackend-html[] + # endif::[] + # + ConditionalDirectiveRx = /^(\\)?(ifdef|ifndef|ifeval|endif)::(\S*?(?:([,+])\S*?)?)\[(#{CC_ANY}+)?\]$/ + + # Matches a restricted (read as safe) eval expression. + # + # Examples + # + # "{asciidoctor-version}" >= "0.1.0" + # + EvalExpressionRx = /^(#{CC_ANY}+?) *([=!><]=|[><]) *(#{CC_ANY}+)$/ + + # Matches an include preprocessor directive. + # + # Examples + # + # include::chapter1.ad[] + # include::example.txt[lines=1;2;5..10] + # + IncludeDirectiveRx = /^(\\)?include::([^\[][^\[]*)\[(#{CC_ANY}+)?\]$/ + + # Matches a trailing tag directive in an include file. + # + # Examples + # + # // tag::try-catch[] + # try { + # someMethod(); + # catch (Exception e) { + # log(e); + # } + # // end::try-catch[] + # NOTE m flag is required for Asciidoctor.js + TagDirectiveRx = /\b(?:tag|(e)nd)::(\S+?)\[\](?=$|[ \r])/m + + ## Attribute entries and references + + # Matches a document attribute entry. + # + # Examples + # + # :foo: bar + # :First Name: Dan + # :sectnums!: + # :!toc: + # :long-entry: Attribute value lines ending in ' \' \ + # are joined together as a single value, \ + # collapsing the line breaks and indentation to \ + # a single space. + # + AttributeEntryRx = /^:(!?#{CG_WORD}[^:]*):(?:[ \t]+(#{CC_ANY}*))?$/ + + # Matches invalid characters in an attribute name. + InvalidAttributeNameCharsRx = /[^#{CC_WORD}-]/ + + # Matches a pass inline macro that surrounds the value of an attribute + # entry once it has been parsed. + # + # Examples + # + # pass:[text] + # pass:a[{a} {b} {c}] + # + if RUBY_ENGINE == 'opal' + # NOTE In JavaScript, ^ and $ match the boundaries of the string when the m flag is not set + AttributeEntryPassMacroRx = /^pass:([a-z]+(?:,[a-z-]+)*)?\[(#{CC_ALL}*)\]$/ + else + AttributeEntryPassMacroRx = /\Apass:([a-z]+(?:,[a-z-]+)*)?\[(.*)\]\Z/m + end + + # Matches an inline attribute reference. + # + # Examples + # + # {foobar} or {app_name} or {product-version} + # {counter:sequence-name:1} + # {set:foo:bar} + # {set:name!} + # + AttributeReferenceRx = /(\\)?\{(#{CG_WORD}[#{CC_WORD}-]*|(set|counter2?):#{CC_ANY}+?)(\\)?\}/ + + ## Paragraphs and delimited blocks + + # Matches an anchor (i.e., id + optional reference text) on a line above a block. + # + # Examples + # + # [[idname]] + # [[idname,Reference Text]] + # + BlockAnchorRx = /^\[\[(?:|([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+))?)\]\]$/ + + # Matches an attribute list above a block element. + # + # Examples + # + # # strictly positional + # [quote, Adam Smith, Wealth of Nations] + # + # # name/value pairs + # [NOTE, caption="Good to know"] + # + # # as attribute reference + # [{lead}] + # + BlockAttributeListRx = /^\[(|[#{CC_WORD}.#%{,"']#{CC_ANY}*)\]$/ + + # A combined pattern that matches either a block anchor or a block attribute list. + # + # TODO this one gets hit a lot, should be optimized as much as possible + BlockAttributeLineRx = /^\[(?:|[#{CC_WORD}.#%{,"']#{CC_ANY}*|\[(?:|[#{CC_ALPHA}_:][#{CC_WORD}\-:.]*(?:, *#{CC_ANY}+)?)\])\]$/ + + # Matches a title above a block. + # + # Examples + # + # .Title goes here + # + BlockTitleRx = /^\.(\.?[^ \t.]#{CC_ANY}*)$/ + + # Matches an admonition label at the start of a paragraph. + # + # Examples + # + # NOTE: Just a little note. + # TIP: Don't forget! + # + AdmonitionParagraphRx = /^(#{ADMONITION_STYLES.to_a.join '|'}):[ \t]+/ + + # Matches a literal paragraph, which is a line of text preceded by at least one space. + # + # Examples + # + # Foo + # Foo + LiteralParagraphRx = /^([ \t]+#{CC_ANY}*)$/ + + # Matches a comment block. + # + # Examples + # + # //// + # This is a block comment. + # It can span one or more lines. + # //// + #CommentBlockRx = %r(^/{4,}$) + + # Matches a comment line. + # + # Examples + # + # // note to author + # + #CommentLineRx = %r(^//(?=[^/]|$)) + + ## Section titles + + # Matches an Atx (single-line) section title. + # + # Examples + # + # == Foo + # // ^ a level 1 (h2) section title + # + # == Foo == + # // ^ also a level 1 (h2) section title + # + AtxSectionTitleRx = /^(=={0,5})[ \t]+(#{CC_ANY}+?)(?:[ \t]+\1)?$/ + + # Matches an extended Atx section title that includes support for the Markdown variant. + ExtAtxSectionTitleRx = /^(=={0,5}|#\#{0,5})[ \t]+(#{CC_ANY}+?)(?:[ \t]+\1)?$/ + + # Matches the title only (first line) of an Setext (two-line) section title. + # The title cannot begin with a dot and must have at least one alphanumeric character. + SetextSectionTitleRx = /^((?!\.)#{CC_ANY}*?#{CG_ALNUM}#{CC_ANY}*)$/ + + # Matches an anchor (i.e., id + optional reference text) inside a section title. + # + # Examples + # + # Section Title [[idname]] + # Section Title [[idname,Reference Text]] + # + InlineSectionAnchorRx = / (\\)?\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+))?\]\]$/ + + # Matches invalid ID characters in a section title. + # + # NOTE uppercase chars not included since expression is only run on a lowercase string + InvalidSectionIdCharsRx = /<[^>]+>|&(?:[a-z][a-z]+\d{0,2}|#\d\d\d{0,4}|#x[\da-f][\da-f][\da-f]{0,3});|[^ #{CC_WORD}\-.]+?/ + + # Matches an explicit section level style like sect1 + # + SectionLevelStyleRx = /^sect\d$/ + + ## Lists + + # Detects the start of any list item. + # + # NOTE we only have to check as far as the blank character because we know it means non-whitespace follows. + # IMPORTANT if this regexp does not agree with the regexp for each list type, the parser will hang. + AnyListRx = %r(^(?:[ \t]*(?:-|\*\**|\.\.*|\u2022|\d+\.|[a-zA-Z]\.|[IVXivx]+\))[ \t]|(?!//[^/])[ \t]*[^ \t]#{CC_ANY}*?(?::::{0,2}|;;)(?:$|[ \t])|[ \t])) + + # Matches an unordered list item (one level for hyphens, up to 5 levels for asterisks). + # + # Examples + # + # * Foo + # - Foo + # + # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces + UnorderedListRx = /^[ \t]*(-|\*\**|\u2022)[ \t]+(#{CC_ANY}*)$/ + + # Matches an ordered list item (explicit numbering or up to 5 consecutive dots). + # + # Examples + # + # . Foo + # .. Foo + # 1. Foo (arabic, default) + # a. Foo (loweralpha) + # A. Foo (upperalpha) + # i. Foo (lowerroman) + # I. Foo (upperroman) + # + # NOTE leading space match is not always necessary, but is used for list reader + # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces + OrderedListRx = /^[ \t]*(\.\.*|\d+\.|[a-zA-Z]\.|[IVXivx]+\))[ \t]+(#{CC_ANY}*)$/ + + # Matches the ordinals for each type of ordered list. + OrderedListMarkerRxMap = { + arabic: /\d+\./, + loweralpha: /[a-z]\./, + lowerroman: /[ivx]+\)/, + upperalpha: /[A-Z]\./, + upperroman: /[IVX]+\)/, + #lowergreek: /[a-z]\]/, + } + + # Matches a description list entry. + # + # Examples + # + # foo:: + # bar::: + # baz:::: + # blah;; + # + # # the term may be followed by a description on the same line... + # + # foo:: The metasyntactic variable that commonly accompanies 'bar' (see also, <>). + # + # # ...or on a separate line, which may optionally be indented + # + # foo:: + # The metasyntactic variable that commonly accompanies 'bar' (see also, <>). + # + # # attribute references may be used in both the term and the description + # + # {foo-term}:: {foo-desc} + # + # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces + # NOTE must skip line comment when looking for next list item inside list + DescriptionListRx = %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?)(:::{0,2}|;;)(?:$|[ \t]+(#{CC_ANY}*)$)) + + # Matches a sibling description list item (excluding the delimiter specified by the key). + # NOTE must skip line comment when looking for sibling list item + DescriptionListSiblingRx = { + '::' => %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?[^:]|[^ \t:])(::)(?:$|[ \t]+(#{CC_ANY}*)$)), + ':::' => %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?[^:]|[^ \t:])(:::)(?:$|[ \t]+(#{CC_ANY}*)$)), + '::::' => %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?[^:]|[^ \t:])(::::)(?:$|[ \t]+(#{CC_ANY}*)$)), + ';;' => %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?)(;;)(?:$|[ \t]+(#{CC_ANY}*)$)) + } + + # Matches a callout list item. + # + # Examples + # + # <1> Explanation + # + # or + # + # <.> Explanation with automatic number + # + # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces + CalloutListRx = /^<(\d+|\.)>[ \t]+(#{CC_ANY}*)$/ + + # Matches a callout reference inside literal text. + # + # Examples + # <1> (optionally prefixed by //, #, -- or ;; line comment chars) + # <1> <2> (multiple callouts on one line) + # (for XML-based languages) + # <.> (auto-numbered) + # + # NOTE extract regexps are applied line-by-line, so we can use $ as end-of-line char + CalloutExtractRx = %r(((?://|#|--|;;) ?)?(\\)?(?=(?: ?\\?)*$)) + CalloutExtractRxt = '(\\\\)?<()(\\d+|\\.)>(?=(?: ?\\\\?<(?:\\d+|\\.)>)*$)' + CalloutExtractRxMap = ::Hash.new {|h, k| h[k] = /(#{k.empty? ? '' : "#{::Regexp.escape k} ?"})?#{CalloutExtractRxt}/ } + # NOTE special characters have not been replaced when scanning + CalloutScanRx = /\\?(?=(?: ?\\?)*#{CC_EOL})/ + # NOTE special characters have already been replaced when converting to an SGML format + CalloutSourceRx = %r(((?://|#|--|;;) ?)?(\\)?<!?(|--)(\d+|\.)\3>(?=(?: ?\\?<!?\3(?:\d+|\.)\3>)*#{CC_EOL})) + CalloutSourceRxt = "(\\\\)?<()(\\d+|\\.)>(?=(?: ?\\\\?<(?:\\d+|\\.)>)*#{CC_EOL})" + CalloutSourceRxMap = ::Hash.new {|h, k| h[k] = /(#{k.empty? ? '' : "#{::Regexp.escape k} ?"})?#{CalloutSourceRxt}/ } + + # A Hash of regexps for lists used for dynamic access. + ListRxMap = { ulist: UnorderedListRx, olist: OrderedListRx, dlist: DescriptionListRx, colist: CalloutListRx } + + ## Tables + + # Parses the column spec (i.e., colspec) for a table. + # + # Examples + # + # 1*h,2*,^3e + # + ColumnSpecRx = /^(?:(\d+)\*)?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?(\d+%?|~)?([a-z])?$/ + + # Parses the start and end of a cell spec (i.e., cellspec) for a table. + # + # Examples + # + # 2.3+<.>m + # + # FIXME use step-wise scan (or treetop) rather than this mega-regexp + CellSpecStartRx = /^[ \t]*(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ + CellSpecEndRx = /[ \t]+(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ + + # Block macros + + # Matches the custom block macro pattern. + # + # Examples + # + # gist::123456[] + # + #-- + # NOTE we've relaxed the match for target to accomodate the short format (e.g., name::[attrlist]) + CustomBlockMacroRx = /^(#{CG_WORD}[#{CC_WORD}-]*)::(|\S|\S#{CC_ANY}*?\S)\[(#{CC_ANY}+)?\]$/ + + # Matches an image, video or audio block macro. + # + # Examples + # + # image::filename.png[Caption] + # video::http://youtube.com/12345[Cats vs Dogs] + # + BlockMediaMacroRx = /^(image|video|audio)::(\S|\S#{CC_ANY}*?\S)\[(#{CC_ANY}+)?\]$/ + + # Matches the TOC block macro. + # + # Examples + # + # toc::[] + # toc::[levels=2] + # + BlockTocMacroRx = /^toc::\[(#{CC_ANY}+)?\]$/ + + ## Inline macros + + # Matches an anchor (i.e., id + optional reference text) in the flow of text. + # + # Examples + # + # [[idname]] + # [[idname,Reference Text]] + # anchor:idname[] + # anchor:idname[Reference Text] + # + InlineAnchorRx = /(\\)?(?:\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+?))?\]\]|anchor:([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)\[(?:\]|(#{CC_ANY}*?[^\\])\]))/ + + # Scans for a non-escaped anchor (i.e., id + optional reference text) in the flow of text. + InlineAnchorScanRx = /(?:^|[^\\\[])\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+?))?\]\]|(?:^|[^\\])anchor:([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)\[(?:\]|(#{CC_ANY}*?[^\\])\])/ + + # Scans for a leading, non-escaped anchor (i.e., id + optional reference text). + LeadingInlineAnchorRx = /^\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+?))?\]\]/ + + # Matches a bibliography anchor at the start of the list item text (in a bibliography list). + # + # Examples + # + # [[[Fowler_1997]]] Fowler M. ... + # + InlineBiblioAnchorRx = /^\[\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+?))?\]\]\]/ + + # Matches an inline e-mail address. + # + # doc.writer@example.com + # + InlineEmailRx = %r(([\\>:/])?#{CG_WORD}(?:&|[#{CC_WORD}\-.%+])*@#{CG_ALNUM}[#{CC_ALNUM}_\-.]*\.[a-zA-Z]{2,5}\b) + + # Matches an inline footnote macro, which is allowed to span multiple lines. + # + # Examples + # footnote:[text] (not referenceable) + # footnote:id[text] (referenceable) + # footnote:id[] (reference) + # footnoteref:[id,text] (legacy) + # footnoteref:[id] (legacy) + # + InlineFootnoteMacroRx = /\\?footnote(?:(ref):|:([#{CC_WORD}-]+)?)\[(?:|(#{CC_ALL}*?[^\\]))\]/m + + # Matches an image or icon inline macro. + # + # Examples + # + # image:filename.png[Alt Text] + # image:http://example.com/images/filename.png[Alt Text] + # image:filename.png[More [Alt\] Text] (alt text becomes "More [Alt] Text") + # icon:github[large] + # + # NOTE be as non-greedy as possible by not allowing newline or left square bracket in target + InlineImageMacroRx = /\\?i(?:mage|con):([^:\s\[](?:[^\n\[]*[^\s\[])?)\[(|#{CC_ALL}*?[^\\])\]/m + + # Matches an indexterm inline macro, which may span multiple lines. + # + # Examples + # + # indexterm:[Tigers,Big cats] + # (((Tigers,Big cats))) + # indexterm2:[Tigers] + # ((Tigers)) + # + InlineIndextermMacroRx = /\\?(?:(indexterm2?):\[(#{CC_ALL}*?[^\\])\]|\(\((#{CC_ALL}+?)\)\)(?!\)))/m + + # Matches either the kbd or btn inline macro. + # + # Examples + # + # kbd:[F3] + # kbd:[Ctrl+Shift+T] + # kbd:[Ctrl+\]] + # kbd:[Ctrl,T] + # btn:[Save] + # + InlineKbdBtnMacroRx = /(\\)?(kbd|btn):\[(#{CC_ALL}*?[^\\])\]/m + + # Matches an implicit link and some of the link inline macro. + # + # Examples + # + # https://github.com + # https://github.com[GitHub] + # + # link:https://github.com[] + # + # FIXME revisit! the main issue is we need different rules for implicit vs explicit + InlineLinkRx = %r((^|link:|#{CG_BLANK}|<|[>\(\)\[\];])(\\?(?:https?|file|ftp|irc)://[^\s\[\]<]*([^\s.,\[\]<]))(?:\[(|#{CC_ALL}*?[^\\])\])?)m + + # Match a link or e-mail inline macro. + # + # Examples + # + # link:path[label] + # mailto:doc.writer@example.com[] + # + # NOTE be as non-greedy as possible by not allowing space or left square bracket in target + InlineLinkMacroRx = /\\?(?:link|(mailto)):(|[^:\s\[][^\s\[]*)\[(|#{CC_ALL}*?[^\\])\]/m + + # Matches the name of a macro. + # + MacroNameRx = /^#{CG_WORD}[#{CC_WORD}-]*$/ + + # Matches a stem (and alternatives, asciimath and latexmath) inline macro, which may span multiple lines. + # + # Examples + # + # stem:[x != 0] + # asciimath:[x != 0] + # latexmath:[\sqrt{4} = 2] + # + InlineStemMacroRx = /\\?(stem|(?:latex|ascii)math):([a-z]+(?:,[a-z-]+)*)?\[(#{CC_ALL}*?[^\\])\]/m + + # Matches a menu inline macro. + # + # Examples + # + # menu:File[Save As...] + # menu:Edit[] + # menu:View[Page Style > No Style] + # menu:View[Page Style, No Style] + # + InlineMenuMacroRx = /\\?menu:(#{CG_WORD}|[#{CC_WORD}&][^\n\[]*[^\s\[])\[ *(?:|(#{CC_ALL}*?[^\\]))?\]/m + + # Matches an implicit menu inline macro. + # + # Examples + # + # "File > New..." + # + InlineMenuRx = /\\?"([#{CC_WORD}&][^"]*?[ \n]+>[ \n]+[^"]*)"/ + + # Matches an inline passthrough, which may span multiple lines. + # + # Examples + # + # +text+ + # `text` (compat) + # + # NOTE we always capture the attributes so we know when to use compatible (i.e., legacy) behavior + InlinePassRx = { + false => ['+', '`', /(^|[^#{CC_WORD};:])(?:\[([^\]]+)\])?(\\?(\+|`)(\S|\S#{CC_ALL}*?\S)\4)(?!#{CG_WORD})/m], + true => ['`', nil, /(^|[^`#{CC_WORD}])(?:\[([^\]]+)\])?(\\?(`)([^`\s]|[^`\s]#{CC_ALL}*?\S)\4)(?![`#{CC_WORD}])/m] + } + + # Matches an inline plus passthrough spanning multiple lines, but only when it occurs directly + # inside constrained monospaced formatting in non-compat mode. + # + # Examples + # + # +text+ + # + SinglePlusInlinePassRx = /^(\\)?\+(\S|\S#{CC_ALL}*?\S)\+$/m + + # Matches several variants of the passthrough inline macro, which may span multiple lines. + # + # Examples + # + # +++text+++ + # $$text$$ + # pass:quotes[text] + # + # NOTE we have to support an empty pass:[] for compatibility with AsciiDoc Python + InlinePassMacroRx = /(?:(?:(\\?)\[([^\]]+)\])?(\\{0,2})(\+\+\+?|\$\$)(#{CC_ALL}*?)\4|(\\?)pass:([a-z]+(?:,[a-z-]+)*)?\[(|#{CC_ALL}*?[^\\])\])/m + + # Matches an xref (i.e., cross-reference) inline macro, which may span multiple lines. + # + # Examples + # + # <> + # xref:id[reftext] + # + # NOTE special characters have already been escaped, hence the entity references + # NOTE { is included in start characters to support target that begins with attribute reference in title content + InlineXrefMacroRx = %r(\\?(?:<<([#{CC_WORD}#/.:{]#{CC_ALL}*?)>>|xref:([#{CC_WORD}#/.:{]#{CC_ALL}*?)\[(?:\]|(#{CC_ALL}*?[^\\])\])))m + + ## Layout + + # Matches a trailing + preceded by at least one space character, + # which forces a hard line break (
    tag in HTML output). + # + # NOTE AsciiDoc Python allows + to be preceded by TAB; Asciidoctor does not + # + # Examples + # + # Humpty Dumpty sat on a wall, + + # Humpty Dumpty had a great fall. + # + if RUBY_ENGINE == 'opal' + # NOTE In JavaScript, ^ and $ only match the start and end of line if the multiline flag is present + HardLineBreakRx = /^(#{CC_ANY}*) \+$/m + else + # NOTE In Ruby, ^ and $ always match start and end of line + HardLineBreakRx = /^(.*) \+$/ + end + + # Matches a Markdown horizontal rule. + # + # Examples + # + # --- or - - - + # *** or * * * + # ___ or _ _ _ + # + MarkdownThematicBreakRx = /^ {0,3}([-*_])( *)\1\2\1$/ + + # Matches an AsciiDoc or Markdown horizontal rule or AsciiDoc page break. + # + # Examples + # + # ''' (horizontal rule) + # <<< (page break) + # --- or - - - (horizontal rule, Markdown) + # *** or * * * (horizontal rule, Markdown) + # ___ or _ _ _ (horizontal rule, Markdown) + # + ExtLayoutBreakRx = /^(?:'{3,}|<{3,}|([-*_])( *)\1\2\1)$/ + + ## General + + # Matches consecutive blank lines. + # + # Examples + # + # one + # + # two + # + BlankLineRx = /\n{2,}/ + + # Matches a comma or semi-colon delimiter. + # + # Examples + # + # one,two + # three;four + # + #DataDelimiterRx = /[,;]/ + + # Matches whitespace (space, tab, newline) escaped by a backslash. + # + # Examples + # + # three\ blind\ mice + # + EscapedSpaceRx = /\\([ \t\n])/ + + # Detects if text is a possible candidate for the replacements substitution. + # + ReplaceableTextRx = /[&']|--|\.\.\.|\([CRT]M?\)/ + + # Matches a whitespace delimiter, a sequence of spaces, tabs, and/or newlines. + # Matches the parsing rules of %w strings in Ruby. + # + # Examples + # + # one two three four + # five six + # + # TODO change to /(?]+>/ +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/section.rb asciidoctor-2.0.10/lib/asciidoctor/section.rb --- asciidoctor-1.5.5/lib/asciidoctor/section.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/section.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,4 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: Methods for managing sections of AsciiDoc content in a document. # The section responds as an Array of content blocks by delegating @@ -24,92 +24,54 @@ # Public: Get/Set the 0-based index order of this section within the parent block attr_accessor :index - # Public: Get/Set the number of this section within the parent block - # Only relevant if the attribute numbered is true - attr_accessor :number - # Public: Get/Set the section name of this section attr_accessor :sectname # Public: Get/Set the flag to indicate whether this is a special section or a child of one attr_accessor :special - # Public: Get the state of the numbered attribute at this section (need to preserve for creating TOC) + # Public: Get/Set the flag to indicate whether this section should be numbered. + # The sectnum method should only be called if this flag is true. attr_accessor :numbered + # Public: Get the caption for this section (only relevant for appendices) + attr_reader :caption + # Public: Initialize an Asciidoctor::Section object. # - # parent - The parent Asciidoc Object. - def initialize parent = nil, level = nil, numbered = true, opts = {} + # parent - The parent AbstractBlock. If set, must be a Document or Section object (default: nil) + # level - The Integer level of this section (default: 1 more than parent level or 1 if parent not defined) + # numbered - A Boolean indicating whether numbering is enabled for this Section (default: false) + # opts - An optional Hash of options (default: {}) + def initialize parent = nil, level = nil, numbered = false, opts = {} super parent, :section, opts - if level - @level = level + if Section === parent + @level, @special = level || (parent.level + 1), parent.special else - @level = parent ? (parent.level + 1) : 1 + @level, @special = level || 1, false end - @numbered = numbered && @level > 0 - @special = parent && parent.context == :section && parent.special + @numbered = numbered @index = 0 - @number = 1 end # Public: The name of this section, an alias of the section title - alias :name :title + alias name title - # Public: Generate a String id for this section. - # - # The generated id is prefixed with value of the 'idprefix' attribute, which - # is an underscore by default. - # - # Section id synthesis can be disabled by undefining the 'sectids' attribute. - # - # If the generated id is already in use in the document, a count is appended - # until a unique id is found. + # Public: Generate a String ID from the title of this section. # - # Examples - # - # section = Section.new(parent) - # section.title = "Foo" - # section.generate_id - # => "_foo" - # - # another_section = Section.new(parent) - # another_section.title = "Foo" - # another_section.generate_id - # => "_foo_1" - # - # yet_another_section = Section.new(parent) - # yet_another_section.title = "Ben & Jerry" - # yet_another_section.generate_id - # => "_ben_jerry" + # See Section.generate_id for details. def generate_id - if @document.attributes.has_key? 'sectids' - sep = @document.attributes['idseparator'] || '_' - pre = @document.attributes['idprefix'] || '_' - base_id = %(#{pre}#{title.downcase.gsub(InvalidSectionIdCharsRx, sep).tr_s(sep, sep).chomp(sep)}) - # ensure id doesn't begin with idseparator if idprefix is empty and idseparator is not empty - if pre.empty? && !sep.empty? && base_id.start_with?(sep) - base_id = base_id[1..-1] - base_id = base_id[1..-1] while base_id.start_with?(sep) - end - gen_id = base_id - cnt = Compliance.unique_id_start_index - while @document.references[:ids].has_key? gen_id - gen_id = %(#{base_id}#{sep}#{cnt}) - cnt += 1 - end - gen_id - else - nil - end + Section.generate_id title, @document end # Public: Get the section number for the current Section # - # The section number is a unique, dot separated String - # where each entry represents one level of nesting and - # the value of each entry is the 1-based outline number - # of the Section amongst its numbered sibling Sections + # The section number is a dot-separated String that uniquely describes the position of this + # Section in the document. Each entry represents a level of nesting. The value of each entry is + # the 1-based outline number of the Section amongst its numbered sibling Sections. + # + # This method assumes that both the @level and @parent instance variables have been assigned. + # The method also assumes that the value of @parent is either a Document or Section. # # delimiter - the delimiter to separate the number for each level # append - the String to append at the end of the section number @@ -149,10 +111,41 @@ # Returns the section number as a String def sectnum(delimiter = '.', append = nil) append ||= (append == false ? '' : delimiter) - if @level && @level > 1 && @parent && @parent.context == :section - %(#{@parent.sectnum(delimiter)}#{@number}#{append}) + @level > 1 && Section === @parent ? %(#{@parent.sectnum(delimiter, delimiter)}#{@numeral}#{append}) : %(#{@numeral}#{append}) + end + + # (see AbstractBlock#xreftext) + def xreftext xrefstyle = nil + if (val = reftext) && !val.empty? + val + elsif xrefstyle + if @numbered + case xrefstyle + when 'full' + if (type = @sectname) == 'chapter' || type == 'appendix' + quoted_title = sub_placeholder (sub_quotes '_%s_'), title + else + quoted_title = sub_placeholder (sub_quotes @document.compat_mode ? %q(``%s'') : '"`%s`"'), title + end + if (signifier = @document.attributes[%(#{type}-refsig)]) + %(#{signifier} #{sectnum '.', ','} #{quoted_title}) + else + %(#{sectnum '.', ','} #{quoted_title}) + end + when 'short' + if (signifier = @document.attributes[%(#{@sectname}-refsig)]) + %(#{signifier} #{sectnum '.', ''}) + else + sectnum '.', '' + end + else # 'basic' + (type = @sectname) == 'chapter' || type == 'appendix' ? (sub_placeholder (sub_quotes '_%s_'), title) : title + end + else # apply basic styling + (type = @sectname) == 'chapter' || type == 'appendix' ? (sub_placeholder (sub_quotes '_%s_'), title) : title + end else - %(#{@number}#{append}) + title end end @@ -164,17 +157,65 @@ # # Returns The parent Block def << block - assign_index block if block.context == :section + assign_numeral block if block.context == :section super end def to_s - if @title != nil - qualified_title = @numbered ? %(#{sectnum} #{@title}) : @title - %(#<#{self.class}@#{object_id} {level: #{@level}, title: #{qualified_title.inspect}, blocks: #{@blocks.size}}>) + if @title + formal_title = @numbered ? %(#{sectnum} #{@title}) : @title + %(#<#{self.class}@#{object_id} {level: #{@level}, title: #{formal_title.inspect}, blocks: #{@blocks.size}}>) else super end end + + # Public: Generate a String ID from the given section title. + # + # The generated ID is prefixed with value of the 'idprefix' attribute, which + # is an underscore (_) by default. Invalid characters are then removed and + # spaces are replaced with the value of the 'idseparator' attribute, which is + # an underscore (_) by default. + # + # If the generated ID is already in use in the document, a count is appended, + # offset by the separator, until a unique ID is found. + # + # Section ID generation can be disabled by unsetting the 'sectids' document attribute. + # + # Examples + # + # Section.generate_id 'Foo', document + # => "_foo" + # + # Returns the generated [String] ID. + def self.generate_id title, document + attrs = document.attributes + pre = attrs['idprefix'] || '_' + if (sep = attrs['idseparator']) + if sep.length == 1 || (!(no_sep = sep.empty?) && (sep = attrs['idseparator'] = sep.chr)) + sep_sub = sep == '-' || sep == '.' ? ' .-' : %( #{sep}.-) + end + else + sep, sep_sub = '_', ' _.-' + end + gen_id = %(#{pre}#{title.downcase.gsub InvalidSectionIdCharsRx, ''}) + if no_sep + gen_id = gen_id.delete ' ' + else + # replace space with separator and remove repeating and trailing separator characters + gen_id = gen_id.tr_s sep_sub, sep + gen_id = gen_id.chop if gen_id.end_with? sep + # ensure id doesn't begin with idseparator if idprefix is empty (assuming idseparator is not empty) + gen_id = gen_id.slice 1, gen_id.length if pre.empty? && (gen_id.start_with? sep) + end + if document.catalog[:refs].key? gen_id + ids = document.catalog[:refs] + cnt = Compliance.unique_id_start_index + cnt += 1 while ids[candidate_id = %(#{gen_id}#{sep}#{cnt})] + candidate_id + else + gen_id + end + end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/stylesheets.rb asciidoctor-2.0.10/lib/asciidoctor/stylesheets.rb --- asciidoctor-1.5.5/lib/asciidoctor/stylesheets.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/stylesheets.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,4 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # A utility class for working with the built-in stylesheets. #-- @@ -6,8 +6,7 @@ # QUESTION create method for user stylesheet? class Stylesheets DEFAULT_STYLESHEET_NAME = 'asciidoctor.css' - DEFAULT_PYGMENTS_STYLE = 'default' - STYLESHEETS_DATA_PATH = ::File.join DATA_PATH, 'stylesheets' + STYLESHEETS_DIR = ::File.join DATA_DIR, 'stylesheets' @__instance__ = new @@ -23,77 +22,68 @@ # # returns the [String] Asciidoctor stylesheet data def primary_stylesheet_data - @primary_stylesheet_data ||= ::IO.read(::File.join(STYLESHEETS_DATA_PATH, 'asciidoctor-default.css')).chomp + @primary_stylesheet_data ||= (::File.read (::File.join STYLESHEETS_DIR, 'asciidoctor-default.css'), mode: FILE_READ_MODE).rstrip end + # Deprecated: Generate code to embed the primary stylesheet + # + # Returns the [String] primary stylesheet data wrapped in a ) end - def write_primary_stylesheet target_dir - ::File.open(::File.join(target_dir, primary_stylesheet_name), 'w') {|f| f.write primary_stylesheet_data } + def write_primary_stylesheet target_dir = '.' + ::File.write (::File.join target_dir, primary_stylesheet_name), primary_stylesheet_data, mode: FILE_WRITE_MODE end def coderay_stylesheet_name - 'coderay-asciidoctor.css' + (SyntaxHighlighter.for 'coderay').stylesheet_basename end # Public: Read the contents of the default CodeRay stylesheet # # returns the [String] CodeRay stylesheet data def coderay_stylesheet_data - # NOTE use the following lines to load a built-in theme instead - # unless load_coderay.nil? - # ::CodeRay::Encoders[:html]::CSS.new(:default).stylesheet - # end - @coderay_stylesheet_data ||= ::IO.read(::File.join(STYLESHEETS_DATA_PATH, 'coderay-asciidoctor.css')).chomp + (SyntaxHighlighter.for 'coderay').read_stylesheet end + # Deprecated: Generate code to embed the CodeRay stylesheet + # + # Returns the [String] CodeRay stylesheet data wrapped in a ) end - def write_coderay_stylesheet target_dir - ::File.open(::File.join(target_dir, coderay_stylesheet_name), 'w') {|f| f.write coderay_stylesheet_data } + def write_coderay_stylesheet target_dir = '.' + ::File.write (::File.join target_dir, coderay_stylesheet_name), coderay_stylesheet_data, mode: FILE_WRITE_MODE end def pygments_stylesheet_name style = nil - %(pygments-#{style || DEFAULT_PYGMENTS_STYLE}.css) + (SyntaxHighlighter.for 'pygments').stylesheet_basename style end # Public: Generate the Pygments stylesheet with the specified style. # # returns the [String] Pygments stylesheet data def pygments_stylesheet_data style = nil - if load_pygments - (@pygments_stylesheet_data ||= {})[style || DEFAULT_PYGMENTS_STYLE] ||= - (::Pygments.css '.listingblock .pygments', :classprefix => 'tok-', :style => (style || DEFAULT_PYGMENTS_STYLE)). - sub('.listingblock .pygments {', '.listingblock .pygments, .listingblock .pygments code {') - else - '/* Pygments styles disabled. Pygments is not available. */' - end + (SyntaxHighlighter.for 'pygments').read_stylesheet style end + # Deprecated: Generate code to embed the Pygments stylesheet + # + # Returns the [String] Pygments stylesheet data for the specified style wrapped in a ) end - def write_pygments_stylesheet target_dir, style = nil - ::File.open(::File.join(target_dir, pygments_stylesheet_name(style)), 'w') {|f| f.write pygments_stylesheet_data(style) } - end - - #def load_coderay - # (defined? ::CodeRay) ? true : !(Helpers.require_library 'coderay', true, :ignore).nil? - #end - - def load_pygments - (defined? ::Pygments) ? true : !(Helpers.require_library 'pygments', 'pygments.rb', :ignore).nil? + def write_pygments_stylesheet target_dir = '.', style = nil + ::File.write (::File.join target_dir, (pygments_stylesheet_name style)), (pygments_stylesheet_data style), mode: FILE_WRITE_MODE end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/substitutors.rb asciidoctor-2.0.10/lib/asciidoctor/substitutors.rb --- asciidoctor-1.5.5/lib/asciidoctor/substitutors.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/substitutors.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,108 +1,91 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: Methods to perform substitutions on lines of AsciiDoc text. This module # is intented to be mixed-in to Section and Block to provide operations for performing # the necessary substitutions. module Substitutors + SpecialCharsRx = /[<&>]/ + SpecialCharsTr = { '>' => '>', '<' => '<', '&' => '&' } - SPECIAL_CHARS = { - '&' => '&', - '<' => '<', - '>' => '>' - } - - SPECIAL_CHARS_PATTERN = /[#{SPECIAL_CHARS.keys.join}]/ - - SUBS = { - :basic => [:specialcharacters], - :normal => [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], - :verbatim => [:specialcharacters, :callouts], - :title => [:specialcharacters, :quotes, :replacements, :macros, :attributes, :post_replacements], - :header => [:specialcharacters, :attributes], - # by default, AsciiDoc performs :attributes and :macros on a pass block - # TODO make this a compliance setting - :pass => [] - } + # Detects if text is a possible candidate for the quotes substitution. + QuotedTextSniffRx = { false => /[*_`#^~]/, true => /[*'_+#^~]/ } - COMPOSITE_SUBS = { - :none => [], - :normal => SUBS[:normal], - :verbatim => SUBS[:verbatim], - :specialchars => [:specialcharacters] + (BASIC_SUBS = [:specialcharacters]).freeze + (HEADER_SUBS = [:specialcharacters, :attributes]).freeze + (NO_SUBS = []).freeze + (NORMAL_SUBS = [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements]).freeze + (REFTEXT_SUBS = [:specialcharacters, :quotes, :replacements]).freeze + (VERBATIM_SUBS = [:specialcharacters, :callouts]).freeze + + SUB_GROUPS = { + none: NO_SUBS, + normal: NORMAL_SUBS, + verbatim: VERBATIM_SUBS, + specialchars: BASIC_SUBS, } - SUB_SYMBOLS = { - :a => :attributes, - :m => :macros, - :n => :normal, - :p => :post_replacements, - :q => :quotes, - :r => :replacements, - :c => :specialcharacters, - :v => :verbatim + SUB_HINTS = { + a: :attributes, + m: :macros, + n: :normal, + p: :post_replacements, + q: :quotes, + r: :replacements, + c: :specialcharacters, + v: :verbatim, } SUB_OPTIONS = { - :block => COMPOSITE_SUBS.keys + SUBS[:normal] + [:callouts], - :inline => COMPOSITE_SUBS.keys + SUBS[:normal] + block: SUB_GROUPS.keys + NORMAL_SUBS + [:callouts], + inline: SUB_GROUPS.keys + NORMAL_SUBS, } - SUB_HIGHLIGHT = ['coderay', 'pygments'] + CAN = ?\u0018 + DEL = ?\u007f # Delimiters and matchers for the passthrough placeholder # See http://www.aivosto.com/vbtips/control-characters.html#listabout for characters to use # SPA, start of guarded protected area (\u0096) - PASS_START = "\u0096" + PASS_START = ?\u0096 # EPA, end of guarded protected area (\u0097) - PASS_END = "\u0097" + PASS_END = ?\u0097 - # match placeholder record - PASS_MATCH = /\u0096(\d+)\u0097/ + # match passthrough slot + PassSlotRx = /#{PASS_START}(\d+)#{PASS_END}/ - # fix placeholder record after syntax highlighting - PASS_MATCH_HI = /]*>\u0096<\/span>[^\d]*(\d+)[^\d]*]*>\u0097<\/span>/ + # fix passthrough slot after syntax highlighting + HighlightedPassSlotRx = %r(]*>#{PASS_START}[^\d]*(\d+)[^\d]*]*>#{PASS_END}) - # Internal: A String Array of passthough (unprocessed) text captured from this block - attr_reader :passthroughs - - # Public: Apply the specified substitutions to the lines of text - # - # source - The String or String Array of text to process - # subs - The substitutions to perform. Can be a Symbol or a Symbol Array (default: :normal) - # expand - A Boolean to control whether sub aliases are expanded (default: true) - # - # returns Either a String or String Array, whichever matches the type of the first argument - def apply_subs source, subs = :normal, expand = false - if !subs - return source - elsif subs == :normal - subs = SUBS[:normal] - elsif expand - if ::Symbol === subs - subs = COMPOSITE_SUBS[subs] || [subs] - else - effective_subs = [] - subs.each do |key| - if COMPOSITE_SUBS.has_key? key - effective_subs += COMPOSITE_SUBS[key] - else - effective_subs << key - end - end + RS = '\\' - subs = effective_subs - end - end + R_SB = ']' + + ESC_R_SB = '\]' + + PLUS = '+' - return source if subs.empty? + # Public: Apply the specified substitutions to the text. + # + # text - The String or String Array of text to process; must not be nil. + # subs - The substitutions to perform; must be a Symbol Array or nil (default: NORMAL_SUBS). + # + # Returns a String or String Array to match the type of the text argument with substitutions applied. + def apply_subs text, subs = NORMAL_SUBS + return text if text.empty? || !subs - text = (multiline = ::Array === source) ? source * EOL : source + if (is_multiline = ::Array === text) + text = text[1] ? (text.join LF) : text[0] + end - if (has_passthroughs = subs.include? :macros) + if subs.include? :macros text = extract_passthroughs text - has_passthroughs = false if @passthroughs.empty? + unless @passthroughs.empty? + passthrus = @passthroughs + # NOTE placeholders can move around, so we can only clear in the outermost substitution call + @passthroughs_locked ||= (clear_passthrus = true) + end end subs.each do |type| @@ -112,7 +95,7 @@ when :quotes text = sub_quotes text when :attributes - text = sub_attributes(text.split EOL) * EOL + text = sub_attributes text if text.include? ATTR_REF_HEAD when :replacements text = sub_replacements text when :macros @@ -124,399 +107,183 @@ when :post_replacements text = sub_post_replacements text else - warn %(asciidoctor: WARNING: unknown substitution type #{type}) + logger.warn %(unknown substitution type #{type}) end end - text = restore_passthroughs text if has_passthroughs - multiline ? (text.split EOL) : text + if passthrus + text = restore_passthroughs text + if clear_passthrus + passthrus.clear + @passthroughs_locked = nil + end + end + + is_multiline ? (text.split LF, -1) : text end # Public: Apply normal substitutions. # - # lines - The lines of text to process. Can be a String or a String Array - # - # returns - A String with normal substitutions performed - def apply_normal_subs(lines) - apply_subs(::Array === lines ? lines * EOL : lines) - end - - # Public: Apply substitutions for titles. + # An alias for apply_subs with default remaining arguments. # - # title - The String title to process + # text - The String text to which to apply normal substitutions # - # returns - A String with title substitutions performed - def apply_title_subs(title) - apply_subs title, SUBS[:title] + # Returns the String with normal substitutions applied. + def apply_normal_subs text + apply_subs text, NORMAL_SUBS end # Public: Apply substitutions for header metadata and attribute assignments # # text - String containing the text process # - # returns - A String with header substitutions performed - def apply_header_subs(text) - apply_subs text, SUBS[:header] + # Returns A String with header substitutions performed + def apply_header_subs text + apply_subs text, HEADER_SUBS end - # Internal: Extract the passthrough text from the document for reinsertion after processing. + # Public: Apply substitutions for titles. # - # text - The String from which to extract passthrough fragements + # title - The String title to process # - # returns - The text with the passthrough region substituted with placeholders - def extract_passthroughs(text) - compat_mode = @document.compat_mode - text = text.gsub(PassInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - preceding = nil - - if (boundary = m[4]).nil_or_empty? # pass:[] - if m[6] == '\\' - # NOTE we don't look for nested pass:[] macros - next m[0][1..-1] - end - - @passthroughs[pass_key = @passthroughs.size] = {:text => (unescape_brackets m[8]), :subs => (m[7].nil_or_empty? ? [] : (resolve_pass_subs m[7]))} - else # $$, ++ or +++ - # skip ++ in compat mode, handled as normal quoted text - if compat_mode && boundary == '++' - next m[2].nil_or_empty? ? - %(#{m[1]}#{m[3]}++#{extract_passthroughs m[5]}++) : - %(#{m[1]}[#{m[2]}]#{m[3]}++#{extract_passthroughs m[5]}++) - end - - attributes = m[2] - - # fix non-matching group results in Opal under Firefox - if ::RUBY_ENGINE_OPAL - attributes = nil if attributes == '' - end - - escape_count = m[3].size - content = m[5] - old_behavior = false - - if attributes - if escape_count > 0 - # NOTE we don't look for nested unconstrained pass macros - # must enclose string following next in " for Opal - next "#{m[1]}[#{attributes}]#{'\\' * (escape_count - 1)}#{boundary}#{m[5]}#{boundary})" - elsif m[1] == '\\' - preceding = %([#{attributes}]) - attributes = nil - else - if boundary == '++' && (attributes.end_with? 'x-') - old_behavior = true - attributes = attributes[0...-2] - end - attributes = parse_attributes attributes - end - elsif escape_count > 0 - # NOTE we don't look for nested unconstrained pass macros - # must enclose string following next in " for Opal - next "#{m[1]}[#{attributes}]#{'\\' * (escape_count - 1)}#{boundary}#{m[5]}#{boundary}" - end - subs = (boundary == '+++' ? [] : [:specialcharacters]) - - pass_key = @passthroughs.size - if attributes - if old_behavior - @passthroughs[pass_key] = {:text => content, :subs => SUBS[:normal], :type => :monospaced, :attributes => attributes} - else - @passthroughs[pass_key] = {:text => content, :subs => subs, :type => :unquoted, :attributes => attributes} - end - else - @passthroughs[pass_key] = {:text => content, :subs => subs} - end - end - - %(#{preceding}#{PASS_START}#{pass_key}#{PASS_END}) - } if (text.include? '++') || (text.include? '$$') || (text.include? 'ss:') - - pass_inline_char1, pass_inline_char2, pass_inline_rx = PassInlineRx[compat_mode] - text = text.gsub(pass_inline_rx) { - # alias match for Ruby 1.8.7 compat - m = $~ - preceding = m[1] - attributes = m[2] - escape_mark = (m[3].start_with? '\\') ? '\\' : nil - format_mark = m[4] - content = m[5] - - # fix non-matching group results in Opal under Firefox - if ::RUBY_ENGINE_OPAL - attributes = nil if attributes == '' - end - - if compat_mode - old_behavior = true - else - if (old_behavior = (attributes && (attributes.end_with? 'x-'))) - attributes = attributes[0...-2] - end - end - - if attributes - if format_mark == '`' && !old_behavior - # must enclose string following next in " for Opal - next "#{preceding}[#{attributes}]#{escape_mark}`#{extract_passthroughs content}`" - end - - if escape_mark - # honor the escape of the formatting mark (must enclose string following next in " for Opal) - next "#{preceding}[#{attributes}]#{m[3][1..-1]}" - elsif preceding == '\\' - # honor the escape of the attributes - preceding = %([#{attributes}]) - attributes = nil - else - attributes = parse_attributes attributes - end - elsif format_mark == '`' && !old_behavior - # must enclose string following next in " for Opal - next "#{preceding}#{escape_mark}`#{extract_passthroughs content}`" - elsif escape_mark - # honor the escape of the formatting mark (must enclose string following next in " for Opal) - next "#{preceding}#{m[3][1..-1]}" - end - - pass_key = @passthroughs.size - if compat_mode - @passthroughs[pass_key] = {:text => content, :subs => [:specialcharacters], :attributes => attributes, :type => :monospaced} - elsif attributes - if old_behavior - subs = (format_mark == '`' ? [:specialcharacters] : SUBS[:normal]) - @passthroughs[pass_key] = {:text => content, :subs => subs, :attributes => attributes, :type => :monospaced} - else - @passthroughs[pass_key] = {:text => content, :subs => [:specialcharacters], :attributes => attributes, :type => :unquoted} - end - else - @passthroughs[pass_key] = {:text => content, :subs => [:specialcharacters]} - end - - %(#{preceding}#{PASS_START}#{pass_key}#{PASS_END}) - } if (text.include? pass_inline_char1) || (pass_inline_char2 && (text.include? pass_inline_char2)) - - # NOTE we need to do the stem in a subsequent step to allow it to be escaped by the former - text = text.gsub(StemInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - # honor the escape - if m[0].start_with? '\\' - next m[0][1..-1] - end - - if (type = m[1].to_sym) == :stem - type = ((default_stem_type = document.attributes['stem']).nil_or_empty? ? 'asciimath' : default_stem_type).to_sym - end - content = unescape_brackets m[3] - if m[2].nil_or_empty? - subs = (@document.basebackend? 'html') ? [:specialcharacters] : [] - else - subs = resolve_pass_subs m[2] - end - - @passthroughs[pass_key = @passthroughs.size] = {:text => content, :subs => subs, :type => type} - %(#{PASS_START}#{pass_key}#{PASS_END}) - } if (text.include? ':') && ((text.include? 'stem:') || (text.include? 'math:')) - - text - end + # Returns A String with title substitutions performed + alias apply_title_subs apply_subs - # Internal: Restore the passthrough text by reinserting into the placeholder positions + # Public: Apply substitutions for reftext. # - # text - The String text into which to restore the passthrough text - # outer - A Boolean indicating whether we are in the outer call (default: true) + # text - The String to process # - # returns The String text with the passthrough text restored - def restore_passthroughs text, outer = true - if outer && (@passthroughs.empty? || !text.include?(PASS_START)) - return text - end - - text.gsub(PASS_MATCH) { - # NOTE we can't remove entry from map because placeholder may have been duplicated by other substitutions - pass = @passthroughs[$~[1].to_i] - subbed_text = (subs = pass[:subs]) ? apply_subs(pass[:text], subs) : pass[:text] - if (type = pass[:type]) - subbed_text = Inline.new(self, :quoted, subbed_text, :type => type, :attributes => pass[:attributes]).convert - end - subbed_text.include?(PASS_START) ? restore_passthroughs(subbed_text, false) : subbed_text - } - ensure - # free memory if in outer call...we don't need these anymore - @passthroughs.clear if outer + # Returns a String with all substitutions from the reftext substitution group applied + def apply_reftext_subs text + apply_subs text, REFTEXT_SUBS end # Public: Substitute special characters (i.e., encode XML) # - # Special characters are defined in the Asciidoctor::SPECIAL_CHARS Array constant + # The special characters <, &, and > get replaced with <, &, and >, respectively. # - # text - The String text to process + # text - The String text to process. # - # returns The String text with special characters replaced - def sub_specialchars(text) - SUPPORTS_GSUB_RESULT_HASH ? - text.gsub(SPECIAL_CHARS_PATTERN, SPECIAL_CHARS) : - text.gsub(SPECIAL_CHARS_PATTERN) { SPECIAL_CHARS[$&] } - end - alias :sub_specialcharacters :sub_specialchars - + # Returns The String text with special characters replaced. if RUBY_ENGINE == 'opal' - def sub_quotes text - QUOTE_SUBS[@document.compat_mode].each do |type, scope, pattern| - text = text.gsub(pattern) { convert_quoted_text $~, type, scope } - end - text - end - - def sub_replacements text - REPLACEMENTS.each do |pattern, replacement, restore| - text = text.gsub(pattern) { do_replacement $~, replacement, restore } - end - text + def sub_specialchars text + (text.include? ?>) || (text.include? ?&) || (text.include? ?<) ? (text.gsub SpecialCharsRx, SpecialCharsTr) : text end else - # Public: Substitute quoted text (includes emphasis, strong, monospaced, etc) - # - # text - The String text to process - # - # returns The converted String text - def sub_quotes text - # NOTE interpolation is faster than String#dup - text = %(#{text}) - # NOTE using gsub! here as an MRI Ruby optimization - QUOTE_SUBS[@document.compat_mode].each do |type, scope, pattern| - text.gsub!(pattern) { convert_quoted_text $~, type, scope } - end - text - end - - # Public: Substitute replacement characters (e.g., copyright, trademark, etc) - # - # text - The String text to process - # - # returns The String text with the replacement characters substituted - def sub_replacements text - # NOTE interpolation is faster than String#dup - text = %(#{text}) - # NOTE Using gsub! as optimization - REPLACEMENTS.each do |pattern, replacement, restore| - text.gsub!(pattern) { do_replacement $~, replacement, restore } + CGI = ::CGI + def sub_specialchars text + if (text.include? ?>) || (text.include? ?&) || (text.include? ?<) + (text.include? ?') || (text.include? ?") ? (text.gsub SpecialCharsRx, SpecialCharsTr) : (CGI.escape_html text) + else + text end - text end end + alias sub_specialcharacters sub_specialchars - # Internal: Substitute replacement text for matched location + # Public: Substitute quoted text (includes emphasis, strong, monospaced, etc.) # - # returns The String text with the replacement characters substituted - def do_replacement m, replacement, restore - if (matched = m[0]).include? '\\' - matched.tr '\\', '' - else - case restore - when :none - replacement - when :leading - %(#{m[1]}#{replacement}) - when :bounding - %(#{m[1]}#{replacement}#{m[2]}) + # text - The String text to process + # + # returns The converted [String] text + def sub_quotes text + if QuotedTextSniffRx[compat = @document.compat_mode].match? text + QUOTE_SUBS[compat].each do |type, scope, pattern| + text = text.gsub(pattern) { convert_quoted_text $~, type, scope } end end + text end - # Public: Substitute attribute references + # Public: Substitutes attribute references in the specified text # # Attribute references are in the format +{name}+. # - # If an attribute referenced in the line is missing, the line is dropped. + # If an attribute referenced in the line is missing or undefined, the line may be dropped + # based on the attribute-missing or attribute-undefined setting, respectively. # - # text - The String text to process + # text - The String text to process + # opts - A Hash of options to control processing: (default: {}) + # * :attribute_missing controls how to handle a missing attribute (see Compliance.attribute_missing for values) + # * :drop_line_severity the severity level at which to log a dropped line (:info or :ignore) # - # returns The String text with the attribute references replaced with attribute values - #-- - # NOTE it's necessary to perform this substitution line-by-line - # so that a missing key doesn't wipe out the whole block of data - # when attribute-undefined and/or attribute-missing is drop-line - def sub_attributes data, opts = {} - return data if data.nil_or_empty? - - # normalizes data type to an array (string becomes single-element array) - if (string_data = ::String === data) - data = [data] - end - + # Returns the [String] text with the attribute references replaced with resolved values + def sub_attributes text, opts = {} doc_attrs = @document.attributes - attribute_missing = nil - result = [] - data.each do |line| - reject = false - reject_if_empty = false - line = line.gsub(AttributeReferenceRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - # escaped attribute, return unescaped - if m[1] == '\\' || m[4] == '\\' - %({#{m[2]}}) - elsif !m[3].nil_or_empty? - offset = (directive = m[3]).length + 1 - expr = m[2][offset..-1] - case directive - when 'set' - args = expr.split(':') - _, value = Parser.store_attribute(args[0], args[1] || '', @document) - unless value - # since this is an assignment, only drop-line applies here (skip and drop imply the same result) - if doc_attrs.fetch('attribute-undefined', Compliance.attribute_undefined) == 'drop-line' - reject = true - break '' - end - end - reject_if_empty = true - '' - when 'counter', 'counter2' - args = expr.split(':') - val = @document.counter(args[0], args[1]) - if directive == 'counter2' - reject_if_empty = true - '' - else - val - end - else - # if we get here, our AttributeReference regex is too loose - warn %(asciidoctor: WARNING: illegal attribute directive: #{m[3]}) - m[0] - end - elsif doc_attrs.key?(key = m[2].downcase) - doc_attrs[key] - elsif INTRINSIC_ATTRIBUTES.key? key - INTRINSIC_ATTRIBUTES[key] - else - case (attribute_missing ||= (opts[:attribute_missing] || doc_attrs.fetch('attribute-missing', Compliance.attribute_missing))) - when 'skip' - m[0] - when 'drop-line' - warn %(asciidoctor: WARNING: dropping line containing reference to missing attribute: #{key}) - reject = true - break '' - when 'warn' - warn %(asciidoctor: WARNING: skipping reference to missing attribute: #{key}) - m[0] - else # 'drop' - # QUESTION should we warn in this case? - reject_if_empty = true - '' - end + drop = drop_line = drop_line_severity = drop_empty_line = attribute_undefined = attribute_missing = nil + text = text.gsub AttributeReferenceRx do + # escaped attribute, return unescaped + if $1 == RS || $4 == RS + %({#{$2}}) + elsif $3 + case (args = $2.split ':', 3).shift + when 'set' + _, value = Parser.store_attribute args[0], args[1] || '', @document + # NOTE since this is an assignment, only drop-line applies here (skip and drop imply the same result) + if value || (attribute_undefined ||= (doc_attrs['attribute-undefined'] || Compliance.attribute_undefined)) != 'drop-line' + drop = drop_empty_line = DEL + else + drop = drop_line = CAN + end + when 'counter2' + @document.counter(*args) + drop = drop_empty_line = DEL + else # 'counter' + @document.counter(*args) + end + elsif doc_attrs.key?(key = $2.downcase) + doc_attrs[key] + elsif (value = INTRINSIC_ATTRIBUTES[key]) + value + else + case (attribute_missing ||= (opts[:attribute_missing] || doc_attrs['attribute-missing'] || Compliance.attribute_missing)) + when 'drop' + drop = drop_empty_line = DEL + when 'drop-line' + if (drop_line_severity ||= (opts[:drop_line_severity] || :info)) == :info + logger.info { %(dropping line containing reference to missing attribute: #{key}) } + #elsif drop_line_severity == :warn + # logger.warn %(dropping line containing reference to missing attribute: #{key}) + end + drop = drop_line = CAN + when 'warn' + logger.warn %(skipping reference to missing attribute: #{key}) + $& + else # 'skip' + $& end - } if line.include? '{' + end + end - result << line unless reject || (reject_if_empty && line.empty?) + if drop + # drop lines from text + if drop_empty_line + lines = (text.squeeze DEL).split LF, -1 + if drop_line + (lines.reject {|line| line == DEL || line == CAN || (line.start_with? CAN) || (line.include? CAN) }.join LF).delete DEL + else + (lines.reject {|line| line == DEL }.join LF).delete DEL + end + elsif text.include? LF + (text.split LF, -1).reject {|line| line == CAN || (line.start_with? CAN) || (line.include? CAN) }.join LF + else + '' + end + else + text end + end - string_data ? result * EOL : result + # Public: Substitute replacement characters (e.g., copyright, trademark, etc.) + # + # text - The String text to process + # + # returns The [String] text with the replacement characters substituted + def sub_replacements text + REPLACEMENTS.each do |pattern, replacement, restore| + text = text.gsub(pattern) { do_replacement $~, replacement, restore } + end if ReplaceableTextRx.match? text + text end # Public: Substitute inline macros (e.g., links, images, etc) @@ -526,808 +293,882 @@ # source - The String text to process # # returns The converted String text - def sub_macros(source) - return source if source.nil_or_empty? - + def sub_macros text + #return text if text.nil_or_empty? # some look ahead assertions to cut unnecessary regex calls - found = {} - found[:square_bracket] = source.include?('[') - found[:round_bracket] = source.include?('(') - found[:colon] = found_colon = source.include?(':') - found[:macroish] = (found[:square_bracket] && found_colon) - found[:macroish_short_form] = (found[:square_bracket] && found_colon && source.include?(':[')) - use_link_attrs = @document.attributes.has_key?('linkattrs') - experimental = @document.attributes.has_key?('experimental') - - # NOTE interpolation is faster than String#dup - result = %(#{source}) - - if experimental - if found[:macroish_short_form] && (result.include?('kbd:') || result.include?('btn:')) - result = result.gsub(KbdBtnInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - # honor the escape - if (captured = m[0]).start_with? '\\' - next captured[1..-1] - end + found_square_bracket = text.include? '[' + found_colon = text.include? ':' + found_macroish = found_square_bracket && found_colon + found_macroish_short = found_macroish && (text.include? ':[') + doc_attrs = (doc = @document).attributes - if captured.start_with?('kbd') - keys = unescape_bracketed_text m[1] - - if keys == '+' - keys = ['+'] + # TODO allow position of substitution to be controlled (before or after other macros) + # TODO this handling needs some cleanup + if (extensions = doc.extensions) && extensions.inline_macros? # && found_macroish + extensions.inline_macros.each do |extension| + text = text.gsub extension.instance.regexp do + # honor the escape + next $&.slice 1, $&.length if (match = $&).start_with? RS + if $~.names.empty? + target, content = $1, $2 + else + target, content = ($~[:target] rescue nil), ($~[:content] rescue nil) + end + attributes = (default_attrs = (ext_config = extension.config)[:default_attrs]) ? default_attrs.merge : {} + if content + if content.empty? + attributes['text'] = content unless ext_config[:content_model] == :attributes else - # need to use closure to work around lack of negative lookbehind - keys = keys.split(KbdDelimiterRx).inject([]) {|c, key| - if key.end_with?('++') - c << key[0..-3].strip - c << '+' - else - c << key.strip - end - c - } + content = normalize_text content, true, true + # QUESTION should we store the unparsed attrlist in the attrlist key? + if ext_config[:content_model] == :attributes + parse_attributes content, ext_config[:positional_attrs] || ext_config[:pos_attrs] || [], into: attributes + else + attributes['text'] = content + end + end + # NOTE for convenience, map content (unparsed attrlist) to target when format is short + target ||= ext_config[:format] == :short ? content : target + end + if (Inline === (replacement = extension.process_method[self, target, attributes])) + if (inline_subs = replacement.attributes.delete 'subs') + replacement.text = apply_subs replacement.text, (expand_subs inline_subs) end - Inline.new(self, :kbd, nil, :attributes => {'keys' => keys}).convert - elsif captured.start_with?('btn') - label = unescape_bracketed_text m[1] - Inline.new(self, :button, label).convert + replacement.convert + elsif replacement + logger.info %(expected substitution value for custom inline macro to be of type Inline; got #{replacement.class}: #{match}) + replacement + else + '' end - } + end end + end - if found[:macroish] && result.include?('menu:') - result = result.gsub(MenuInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ + if doc_attrs.key? 'experimental' + if found_macroish_short && ((text.include? 'kbd:') || (text.include? 'btn:')) + text = text.gsub InlineKbdBtnMacroRx do # honor the escape - if (captured = m[0]).start_with? '\\' - next captured[1..-1] + if $1 + $&.slice 1, $&.length + elsif $2 == 'kbd' + if (keys = $3.strip).include? R_SB + keys = keys.gsub ESC_R_SB, R_SB + end + if keys.length > 1 && (delim_idx = (delim_idx = keys.index ',', 1) ? + [delim_idx, (keys.index '+', 1)].compact.min : (keys.index '+', 1)) + delim = keys.slice delim_idx, 1 + # NOTE handle special case where keys ends with delimiter (e.g., Ctrl++ or Ctrl,,) + if keys.end_with? delim + keys = (keys.chop.split delim, -1).map {|key| key.strip } + keys[-1] += delim + else + keys = keys.split(delim).map {|key| key.strip } + end + else + keys = [keys] + end + (Inline.new self, :kbd, nil, attributes: { 'keys' => keys }).convert + else # $2 == 'btn' + (Inline.new self, :button, (normalize_text $3, true, true)).convert end + end + end - menu = m[1] - items = m[2] + if found_macroish && (text.include? 'menu:') + text = text.gsub InlineMenuMacroRx do + # honor the escape + next $&.slice 1, $&.length if $&.start_with? RS - if !items - submenus = [] - menuitem = nil - else + menu = $1 + if (items = $2) + items = items.gsub ESC_R_SB, R_SB if items.include? R_SB if (delim = items.include?('>') ? '>' : (items.include?(',') ? ',' : nil)) submenus = items.split(delim).map {|it| it.strip } menuitem = submenus.pop else - submenus = [] - menuitem = items.rstrip + submenus, menuitem = [], items.rstrip end + else + submenus, menuitem = [], nil end - Inline.new(self, :menu, nil, :attributes => {'menu' => menu, 'submenus' => submenus, 'menuitem' => menuitem}).convert - } + Inline.new(self, :menu, nil, attributes: { 'menu' => menu, 'submenus' => submenus, 'menuitem' => menuitem }).convert + end end - if result.include?('"') && result.include?('>') - result = result.gsub(MenuInlineRx) { - # alias match for Ruby 1.8.7 compat - m = $~ + if (text.include? '"') && (text.include? '>') + text = text.gsub InlineMenuRx do # honor the escape - if (captured = m[0]).start_with? '\\' - next captured[1..-1] - end + next $&.slice 1, $&.length if $&.start_with? RS - input = m[1] - - menu, *submenus = input.split('>').map {|it| it.strip } + menu, *submenus = $1.split('>').map {|it| it.strip } menuitem = submenus.pop - Inline.new(self, :menu, nil, :attributes => {'menu' => menu, 'submenus' => submenus, 'menuitem' => menuitem}).convert - } - end - end - - # FIXME this location is somewhat arbitrary, probably need to be able to control ordering - # TODO this handling needs some cleanup - if (extensions = @document.extensions) && extensions.inline_macros? # && found[:macroish] - extensions.inline_macros.each do |extension| - result = result.gsub(extension.instance.regexp) { - # alias match for Ruby 1.8.7 compat - m = $~ - # honor the escape - if m[0].start_with? '\\' - next m[0][1..-1] - end - - target = m[1] - attributes = if extension.config[:format] == :short - # TODO if content_model is :attributes, set target to nil and parse attributes - # maybe if content_model is :text, we should put content into text attribute - {} - else - if extension.config[:content_model] == :attributes - parse_attributes m[2], (extension.config[:pos_attrs] || []), :sub_input => true, :unescape_input => true - else - { 'text' => (unescape_bracketed_text m[2]) } - end - end - extension.process_method[self, target, attributes] - } + Inline.new(self, :menu, nil, attributes: { 'menu' => menu, 'submenus' => submenus, 'menuitem' => menuitem }).convert + end end end - if found[:macroish] && (result.include?('image:') || result.include?('icon:')) + if found_macroish && ((text.include? 'image:') || (text.include? 'icon:')) # image:filename.png[Alt Text] - result = result.gsub(ImageInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ + text = text.gsub InlineImageMacroRx do # honor the escape - if m[0].start_with? '\\' - next m[0][1..-1] - end - - raw_attrs = unescape_bracketed_text m[2] - if m[0].start_with? 'icon:' - type = 'icon' - posattrs = ['size'] + if $&.start_with? RS + next $&.slice 1, $&.length + elsif $&.start_with? 'icon:' + type, posattrs = 'icon', ['size'] else - type = 'image' - posattrs = ['alt', 'width', 'height'] + type, posattrs = 'image', ['alt', 'width', 'height'] end - target = sub_attributes(m[1]) + target = $1 + attrs = parse_attributes $2, posattrs, unescape_input: true unless type == 'icon' - @document.register(:images, target) + doc.register :images, target + attrs['imagesdir'] = doc_attrs['imagesdir'] end - attrs = parse_attributes(raw_attrs, posattrs) - attrs['alt'] ||= Helpers.basename(target, true).tr('_-', ' ') - Inline.new(self, :image, nil, :type => type, :target => target, :attributes => attrs).convert - } + attrs['alt'] ||= (attrs['default-alt'] = Helpers.basename(target, true).tr('_-', ' ')) + Inline.new(self, :image, nil, type: type, target: target, attributes: attrs).convert + end end - if found[:macroish_short_form] || found[:round_bracket] - # indexterm:[Tigers,Big cats] + if ((text.include? '((') && (text.include? '))')) || (found_macroish_short && (text.include? 'dexterm')) # (((Tigers,Big cats))) - # indexterm2:[Tigers] + # indexterm:[Tigers,Big cats] # ((Tigers)) - result = result.gsub(IndextermInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - - # honor the escape - if m[0].start_with? '\\' - next m[0][1..-1] - end + # indexterm2:[Tigers] + text = text.gsub InlineIndextermMacroRx do + case $1 + when 'indexterm' + # honor the escape + next $&.slice 1, $&.length if $&.start_with? RS - # fix non-matching group results in Opal under Firefox - if ::RUBY_ENGINE_OPAL - m[1] = nil if m[1] == '' - end - - num_brackets = 0 - text_in_brackets = nil - unless (macro_name = m[1]) - text_in_brackets = m[3] - if (text_in_brackets.start_with? '(') && (text_in_brackets.end_with? ')') - text_in_brackets = text_in_brackets[1...-1] - num_brackets = 3 + # indexterm:[Tigers,Big cats] + if (attrlist = normalize_text $2, true, true).include? '=' + if (primary = (attrs = (AttributeList.new attrlist, self).parse)[1]) + attrs['terms'] = terms = [primary] + if (secondary = attrs[2]) + terms << secondary + if (tertiary = attrs[3]) + terms << tertiary + end + end + if (see_also = attrs['see-also']) + attrs['see-also'] = (see_also.include? ',') ? (see_also.split ',').map {|it| it.lstrip } : [see_also] + end + else + attrs = { 'terms' => (terms = attrlist) } + end else - num_brackets = 2 + attrs = { 'terms' => (terms = split_simple_csv attrlist) } end - end + #doc.register :indexterms, terms + (Inline.new self, :indexterm, nil, attributes: attrs).convert + when 'indexterm2' + # honor the escape + next $&.slice 1, $&.length if $&.start_with? RS - # non-visible - if macro_name == 'indexterm' || num_brackets == 3 - if !macro_name - # (((Tigers,Big cats))) - terms = split_simple_csv normalize_string(text_in_brackets) - else - # indexterm:[Tigers,Big cats] - terms = split_simple_csv normalize_string(m[2], true) + # indexterm2:[Tigers] + if (term = normalize_text $2, true, true).include? '=' + term = (attrs = (AttributeList.new term, self).parse)[1] || (attrs = nil) || term + if attrs && (see_also = attrs['see-also']) + attrs['see-also'] = (see_also.include? ',') ? (see_also.split ',').map {|it| it.lstrip } : [see_also] + end end - @document.register(:indexterms, [*terms]) - Inline.new(self, :indexterm, nil, :attributes => {'terms' => terms}).convert - # visible + #doc.register :indexterms, [term] + (Inline.new self, :indexterm, term, attributes: attrs, type: :visible).convert else - if !macro_name + text = $3 + # honor the escape + if $&.start_with? RS + # escape concealed index term, but process nested flow index term + if (text.start_with? '(') && (text.end_with? ')') + text = text.slice 1, text.length - 2 + visible, before, after = true, '(', ')' + else + next $&.slice 1, $&.length + end + else + visible = true + if text.start_with? '(' + if text.end_with? ')' + text, visible = (text.slice 1, text.length - 2), false + else + text, before, after = (text.slice 1, text.length), '(', '' + end + elsif text.end_with? ')' + text, before, after = text.chop, '', ')' + end + end + if visible # ((Tigers)) - text = normalize_string text_in_brackets + if (term = normalize_text text, true).include? ';&' + if term.include? ' >> ' + term, _, see = term.partition ' >> ' + attrs = { 'see' => see } + elsif term.include? ' &> ' + term, *see_also = term.split ' &> ' + attrs = { 'see-also' => see_also } + end + end + #doc.register :indexterms, [term] + subbed_term = (Inline.new self, :indexterm, term, attributes: attrs, type: :visible).convert else - # indexterm2:[Tigers] - text = normalize_string m[2], true + # (((Tigers,Big cats))) + attrs = {} + if (terms = normalize_text text, true).include? ';&' + if terms.include? ' >> ' + terms, _, see = terms.partition ' >> ' + attrs['see'] = see + elsif terms.include? ' &> ' + terms, *see_also = terms.split ' &> ' + attrs['see-also'] = see_also + end + end + attrs['terms'] = terms = split_simple_csv terms + #doc.register :indexterms, terms + subbed_term = (Inline.new self, :indexterm, nil, attributes: attrs).convert end - @document.register(:indexterms, [text]) - Inline.new(self, :indexterm, text, :type => :visible).convert + before ? %(#{before}#{subbed_term}#{after}) : subbed_term end - } + end end - if found_colon && (result.include? '://') + if found_colon && (text.include? '://') # inline urls, target[text] (optionally prefixed with link: and optionally surrounded by <>) - result = result.gsub(LinkInlineRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - # honor the escape - if m[2].start_with? '\\' - # must enclose string following next in " for Opal - next "#{m[1]}#{m[2][1..-1]}#{m[3]}" - end - # fix non-matching group results in Opal under Firefox - if ::RUBY_ENGINE_OPAL - m[3] = nil if m[3] == '' - end - # not a valid macro syntax w/o trailing square brackets - # we probably shouldn't even get here...our regex is doing too much - if m[1] == 'link:' && !m[3] - next m[0] - end - prefix = (m[1] != 'link:' ? m[1] : '') - target = m[2] - suffix = '' - unless m[3] || target !~ UriTerminator - case $~[0] + text = text.gsub InlineLinkRx do + if (target = $2).start_with? RS + # honor the escape + next %(#{$1}#{target.slice 1, target.length}#{$4}) + end + + prefix, suffix = $1, '' + # NOTE if $4 is set, then we're looking at a formal macro + if $4 + prefix = '' if prefix == 'link:' + text = $4 + else + # invalid macro syntax (link: prefix w/o trailing square brackets) + # FIXME we probably shouldn't even get here...our regex is doing too much + next $& if prefix == 'link:' + text = '' + case $3 when ')' - # strip the trailing ) - target = target[0..-2] + # move trailing ) out of URL + target = target.chop suffix = ')' + # NOTE handle case when modified target is a URI scheme (e.g., http://) + next $& if target.end_with? '://' when ';' - # strip the <> around the link - if prefix.start_with?('<') && target.end_with?('>') - prefix = prefix[4..-1] - target = target[0..-5] - # strip the ); from the end of the link - elsif target.end_with?(');') - target = target[0..-3] + if (prefix.start_with? '<') && (target.end_with? '>') + # move surrounding <> out of URL + prefix = prefix.slice 4, prefix.length + target = target.slice 0, target.length - 4 + elsif (target = target.chop).end_with? ')' + # move trailing ); out of URL + target = target.chop suffix = ');' else - target = target[0..-2] + # move trailing ; out of URL suffix = ';' end + # NOTE handle case when modified target is a URI scheme (e.g., http://) + next $& if target.end_with? '://' when ':' - # strip the ): from the end of the link - if target.end_with?('):') - target = target[0..-3] + if (target = target.chop).end_with? ')' + # move trailing ): out of URL + target = target.chop suffix = '):' else - target = target[0..-2] + # move trailing : out of URL suffix = ':' end + # NOTE handle case when modified target is a URI scheme (e.g., http://) + next $& if target.end_with? '://' end end - @document.register(:links, target) - link_opts = { :type => :link, :target => target } - attrs = nil - #text = m[3] ? sub_attributes(m[3].gsub('\]', ']')) : '' - if m[3].nil_or_empty? - text = '' - else - if use_link_attrs && (m[3].start_with?('"') || (m[3].include?(',') && m[3].include?('='))) - attrs = parse_attributes(sub_attributes(m[3].gsub('\]', ']')), []) - link_opts[:id] = (attrs.delete 'id') if attrs.has_key? 'id' - text = attrs[1] || '' - else - text = sub_attributes(m[3].gsub('\]', ']')) + attrs, link_opts = nil, { type: :link } + unless text.empty? + text = text.gsub ESC_R_SB, R_SB if text.include? R_SB + if !doc.compat_mode && (text.include? '=') + text = (attrs = (AttributeList.new text, self).parse)[1] || '' + link_opts[:id] = attrs['id'] end - # TODO enable in Asciidoctor 1.5.1 - # support pipe-separated text and title - #unless attrs && (attrs.has_key? 'title') - # if text.include? '|' - # attrs ||= {} - # text, attrs['title'] = text.split '|', 2 - # end - #end - if text.end_with? '^' text = text.chop if attrs attrs['window'] ||= '_blank' else - attrs = {'window' => '_blank'} + attrs = { 'window' => '_blank' } end end end if text.empty? - if @document.attr? 'hide-uri-scheme' - text = target.sub UriSniffRx, '' - else - text = target - end - + # NOTE it's not possible for the URI scheme to be bare in this case + text = (doc_attrs.key? 'hide-uri-scheme') ? (target.sub UriSniffRx, '') : target if attrs - attrs['role'] = %(bare #{attrs['role']}).chomp ' ' + attrs['role'] = (attrs.key? 'role') ? %(bare #{attrs['role']}) : 'bare' else - attrs = {'role' => 'bare'} + attrs = { 'role' => 'bare' } end end + doc.register :links, (link_opts[:target] = target) link_opts[:attributes] = attrs if attrs - %(#{prefix}#{Inline.new(self, :anchor, text, link_opts).convert}#{suffix}) - } + %(#{prefix}#{(Inline.new self, :anchor, text, link_opts).convert}#{suffix}) + end end - if found[:macroish] && (result.include? 'link:') || (result.include? 'mailto:') + if found_macroish && ((text.include? 'link:') || (text.include? 'ilto:')) # inline link macros, link:target[text] - result = result.gsub(LinkInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ + text = text.gsub InlineLinkMacroRx do # honor the escape - if m[0].start_with? '\\' - next m[0][1..-1] - end - raw_target = m[1] - mailto = m[0].start_with?('mailto:') - target = mailto ? %(mailto:#{raw_target}) : raw_target - - link_opts = { :type => :link, :target => target } - attrs = nil - #text = sub_attributes(m[2].gsub('\]', ']')) - text = if use_link_attrs && (m[2].start_with?('"') || m[2].include?(',')) - attrs = parse_attributes(sub_attributes(m[2].gsub('\]', ']')), []) - link_opts[:id] = (attrs.delete 'id') if attrs.key? 'id' + if $&.start_with? RS + next $&.slice 1, $&.length + elsif (mailto = $1) + target = 'mailto:' + (mailto_text = $2) + else + target = $2 + end + attrs, link_opts = nil, { type: :link } + unless (text = $3).empty? + text = text.gsub ESC_R_SB, R_SB if text.include? R_SB if mailto - if attrs.key? 2 - target = link_opts[:target] = "#{target}?subject=#{Helpers.encode_uri(attrs[2])}" - - if attrs.key? 3 - target = link_opts[:target] = "#{target}&body=#{Helpers.encode_uri(attrs[3])}" + if !doc.compat_mode && (text.include? ',') + text = (attrs = (AttributeList.new text, self).parse)[1] || '' + link_opts[:id] = attrs['id'] + if attrs.key? 2 + if attrs.key? 3 + target = %(#{target}?subject=#{Helpers.encode_uri_component attrs[2]}&body=#{Helpers.encode_uri_component attrs[3]}) + else + target = %(#{target}?subject=#{Helpers.encode_uri_component attrs[2]}) + end end end + elsif !doc.compat_mode && (text.include? '=') + text = (attrs = (AttributeList.new text, self).parse)[1] || '' + link_opts[:id] = attrs['id'] end - attrs[1] - else - sub_attributes(m[2].gsub('\]', ']')) - end - - # QUESTION should a mailto be registered as an e-mail address? - @document.register(:links, target) - - # TODO enable in Asciidoctor 1.5.1 - # support pipe-separated text and title - #unless attrs && (attrs.key? 'title') - # if text.include? '|' - # attrs ||= {} - # text, attrs['title'] = text.split '|', 2 - # end - #end - if text.end_with? '^' - text = text.chop - if attrs - attrs['window'] ||= '_blank' - else - attrs = {'window' => '_blank'} + if text.end_with? '^' + text = text.chop + if attrs + attrs['window'] ||= '_blank' + else + attrs = { 'window' => '_blank' } + end end end if text.empty? # mailto is a special case, already processed if mailto - text = raw_target + text = mailto_text else - if @document.attr? 'hide-uri-scheme' - text = raw_target.sub UriSniffRx, '' + if doc_attrs.key? 'hide-uri-scheme' + if (text = target.sub UriSniffRx, '').empty? + text = target + end else - text = raw_target + text = target end - if attrs - attrs['role'] = %(bare #{attrs['role']}).chomp ' ' + attrs['role'] = (attrs.key? 'role') ? %(bare #{attrs['role']}) : 'bare' else - attrs = {'role' => 'bare'} + attrs = { 'role' => 'bare' } end end end + # QUESTION should a mailto be registered as an e-mail address? + doc.register :links, (link_opts[:target] = target) link_opts[:attributes] = attrs if attrs Inline.new(self, :anchor, text, link_opts).convert - } + end end - if result.include? '@' - result = result.gsub(EmailInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - address = m[0] - if (lead = m[1]) - case lead - when '\\' - next address[1..-1] - else - next address - end - end + if text.include? '@' + text = text.gsub InlineEmailRx do + # honor the escape + next $1 == RS ? ($&.slice 1, $&.length) : $& if $1 - target = %(mailto:#{address}) + target = 'mailto:' + (address = $&) # QUESTION should this be registered as an e-mail address? - @document.register(:links, target) + doc.register(:links, target) - Inline.new(self, :anchor, address, :type => :link, :target => target).convert - } + Inline.new(self, :anchor, address, type: :link, target: target).convert + end end - if found[:macroish_short_form] && result.include?('footnote') - result = result.gsub(FootnoteInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - # honor the escape - if m[0].start_with? '\\' - next m[0][1..-1] - end - if m[1] == 'footnote' - id = nil - # REVIEW it's a dirty job, but somebody's gotta do it - text = restore_passthroughs(sub_inline_xrefs(sub_inline_anchors(normalize_string m[2], true)), false) - index = @document.counter('footnote-number') - @document.register(:footnotes, Document::Footnote.new(index, id, text)) - type = nil - target = nil - else - id, text = m[2].split(',', 2) - id = id.strip - # NOTE In Opal, text is set to empty string if comma is missing - if text.nil_or_empty? - if (footnote = @document.references[:footnotes].find {|fn| fn.id == id }) - index = footnote.index - text = footnote.text - else - index = nil - text = id - end - target = id - id = nil - type = :xref - else - # REVIEW it's a dirty job, but somebody's gotta do it - text = restore_passthroughs(sub_inline_xrefs(sub_inline_anchors(normalize_string text, true)), false) - index = @document.counter('footnote-number') - @document.register(:footnotes, Document::Footnote.new(index, id, text)) - type = :ref - target = nil - end - end - Inline.new(self, :footnote, text, :attributes => {'index' => index}, :id => id, :target => target, :type => type).convert - } + if found_square_bracket && @context == :list_item && @parent.style == 'bibliography' + text = text.sub(InlineBiblioAnchorRx) { (Inline.new self, :anchor, $2, type: :bibref, id: $1).convert } end - sub_inline_xrefs(sub_inline_anchors(result, found), found) - end - - # Internal: Substitute normal and bibliographic anchors - def sub_inline_anchors(text, found = nil) - if (!found || found[:square_bracket]) && text.include?('[[[') - text = text.gsub(InlineBiblioAnchorRx) { - # alias match for Ruby 1.8.7 compat - m = $~ + if (found_square_bracket && text.include?('[[')) || (found_macroish && text.include?('or:')) + text = text.gsub InlineAnchorRx do # honor the escape - if m[0].start_with? '\\' - next m[0][1..-1] - end - id = reftext = m[1] - Inline.new(self, :anchor, reftext, :type => :bibref, :target => id).convert - } - end + next $&.slice 1, $&.length if $1 - if ((!found || found[:square_bracket]) && text.include?('[[')) || - ((!found || found[:macroish]) && text.include?('anchor:')) - text = text.gsub(InlineAnchorRx) { - # alias match for Ruby 1.8.7 compat - m = $~ - # honor the escape - if m[0].start_with? '\\' - next m[0][1..-1] + # NOTE reftext is only relevant for DocBook output; used as value of xreflabel attribute + if (id = $2) + reftext = $3 + else + id = $4 + if (reftext = $5) && (reftext.include? R_SB) + reftext = reftext.gsub ESC_R_SB, R_SB + end end - # fix non-matching group results in Opal under Firefox - if ::RUBY_ENGINE_OPAL - m[1] = nil if m[1] == '' - m[2] = nil if m[2] == '' - m[4] = nil if m[4] == '' - end - id = m[1] || m[3] - reftext = m[2] || m[4] || %([#{id}]) - # enable if we want to allow double quoted values - #id = id.sub(DoubleQuotedRx, '\2') - #if reftext - # reftext = reftext.sub(DoubleQuotedMultiRx, '\2') - #else - # reftext = "[#{id}]" - #end - Inline.new(self, :anchor, reftext, :type => :ref, :target => id).convert - } + Inline.new(self, :anchor, reftext, type: :ref, id: id).convert + end end - text - end - - # Internal: Substitute cross reference links - def sub_inline_xrefs(text, found = nil) - if (!found || found[:macroish]) || text.include?('<<') - text = text.gsub(XrefInlineMacroRx) { - # alias match for Ruby 1.8.7 compat - m = $~ + #if (text.include? ';&l') || (found_macroish && (text.include? 'xref:')) + if ((text.include? '&') && (text.include? ';&l')) || (found_macroish && (text.include? 'xref:')) + text = text.gsub InlineXrefMacroRx do # honor the escape - if m[0].start_with? '\\' - next m[0][1..-1] + next $&.slice 1, $&.length if $&.start_with? RS + + attrs = {} + if (refid = $1) + refid, text = refid.split ',', 2 + text = text.lstrip if text + else + macro = true + refid = $2 + if (text = $3) + text = text.gsub ESC_R_SB, R_SB if text.include? R_SB + # NOTE if an equal sign (=) is present, parse text as attributes + text = ((AttributeList.new text, self).parse_into attrs)[1] if !doc.compat_mode && (text.include? '=') + end end - # fix non-matching group results in Opal under Firefox - if ::RUBY_ENGINE_OPAL - m[1] = nil if m[1] == '' - end - if m[1] - id, reftext = m[1].split(',', 2).map {|it| it.strip } - id = id.sub(DoubleQuotedRx, '\2') - # NOTE In Opal, reftext is set to empty string if comma is missing - reftext = if reftext.nil_or_empty? - nil + + if doc.compat_mode + fragment = refid + elsif (hash_idx = refid.index '#') + if hash_idx > 0 + if (fragment_len = refid.length - 1 - hash_idx) > 0 + path, fragment = (refid.slice 0, hash_idx), (refid.slice hash_idx + 1, fragment_len) + else + path = refid.chop + end + if macro + if path.end_with? '.adoc' + src2src = path = path.slice 0, path.length - 5 + elsif !(Helpers.extname? path) + src2src = path + end + elsif path.end_with?(*ASCIIDOC_EXTENSIONS.keys) + src2src = path = path.slice 0, (path.rindex '.') + else + src2src = path + end + else + target, fragment = refid, (refid.slice 1, refid.length) + end + elsif macro + if refid.end_with? '.adoc' + src2src = path = refid.slice 0, refid.length - 5 + elsif Helpers.extname? refid + path = refid else - reftext.sub(DoubleQuotedMultiRx, '\2') + fragment = refid end else - id = m[2] - reftext = m[3] unless m[3].nil_or_empty? + fragment = refid end - if id.include? '#' - path, fragment = id.split('#') - # QUESTION perform this check and throw it back if it fails? - #elsif (start_chr = id.chr) == '.' || start_chr == '/' - # next m[0][1..-1] + # handles: #id + if target + refid = fragment + logger.info %(possible invalid reference: #{refid}) if logger.info? && !doc.catalog[:refs][refid] + elsif path + # handles: path#, path#id, path.adoc#, path.adoc#id, or path.adoc (xref macro only) + # the referenced path is the current document, or its contents have been included in the current document + if src2src && (doc.attributes['docname'] == path || doc.catalog[:includes][path]) + if fragment + refid, path, target = fragment, nil, %(##{fragment}) + logger.info %(possible invalid reference: #{refid}) if logger.info? && !doc.catalog[:refs][refid] + else + refid, path, target = nil, nil, '#' + end + else + refid, path = path, %(#{doc.attributes['relfileprefix']}#{path}#{src2src ? (doc.attributes.fetch 'relfilesuffix', doc.outfilesuffix) : ''}) + if fragment + refid, target = %(#{refid}##{fragment}), %(#{path}##{fragment}) + else + target = path + end + end + # handles: id (in compat mode or when natural xrefs are disabled) + elsif doc.compat_mode || !Compliance.natural_xrefs + refid, target = fragment, %(##{fragment}) + logger.info %(possible invalid reference: #{refid}) if logger.info? && doc.catalog[:refs][refid] + # handles: id + elsif doc.catalog[:refs][fragment] + refid, target = fragment, %(##{fragment}) + # handles: Node Title or Reference Text + # do reverse lookup on fragment if not a known ID and resembles reftext (contains a space or uppercase char) + elsif ((fragment.include? ' ') || fragment.downcase != fragment) && (refid = doc.resolve_id fragment) + fragment, target = refid, %(##{refid}) + else + refid, target = fragment, %(##{fragment}) + logger.info %(possible invalid reference: #{refid}) if logger.info? + end + attrs['path'] = path + attrs['fragment'] = fragment + attrs['refid'] = refid + Inline.new(self, :anchor, text, type: :xref, target: target, attributes: attrs).convert + end + end + + if found_macroish && (text.include? 'tnote') + text = text.gsub InlineFootnoteMacroRx do + # honor the escape + next $&.slice 1, $&.length if $&.start_with? RS + + # footnoteref + if $1 + if $3 + id, text = $3.split ',', 2 + logger.warn %(found deprecated footnoteref macro: #{$&}; use footnote macro with target instead) unless doc.compat_mode + else + next $& + end + # footnote else - path = nil - fragment = id + id = $2 + text = $3 end - # handles forms: doc#, doc.adoc#, doc#id and doc.adoc#id - if path - path = Helpers.rootname(path) - # the referenced path is this document, or its contents has been included in this document - if @document.attributes['docname'] == path || @document.references[:includes].include?(path) - refid = fragment - path = nil - target = %(##{fragment}) + if id + if text + text = restore_passthroughs(normalize_text text, true, true) + index = doc.counter('footnote-number') + doc.register(:footnotes, Document::Footnote.new(index, id, text)) + type, target = :ref, nil else - refid = fragment ? %(#{path}##{fragment}) : path - path = "#{@document.attributes['relfileprefix']}#{path}#{@document.attributes.fetch 'outfilesuffix', '.html'}" - target = fragment ? %(#{path}##{fragment}) : path - end - # handles form: id or Section Title - else - # resolve fragment as reftext if cannot be resolved as refid and looks like reftext - if !(@document.references[:ids].has_key? fragment) && - ((fragment.include? ' ') || fragment.downcase != fragment) && - (resolved_id = RUBY_MIN_VERSION_1_9 ? (@document.references[:ids].key fragment) : (@document.references[:ids].index fragment)) - fragment = resolved_id + if (footnote = doc.footnotes.find {|candidate| candidate.id == id }) + index, text = footnote.index, footnote.text + else + logger.warn %(invalid footnote reference: #{id}) + index, text = nil, id + end + type, target, id = :xref, id, nil end - refid = fragment - target = %(##{fragment}) + elsif text + text = restore_passthroughs(normalize_text text, true, true) + index = doc.counter('footnote-number') + doc.register(:footnotes, Document::Footnote.new(index, id, text)) + type = target = nil + else + next $& end - Inline.new(self, :anchor, reftext, :type => :xref, :target => target, :attributes => {'path' => path, 'fragment' => fragment, 'refid' => refid}).convert - } + Inline.new(self, :footnote, text, attributes: { 'index' => index }, id: id, target: target, type: type).convert + end end text end - # Public: Substitute callout source references - # - # text - The String text to process - # - # Returns the converted String text - def sub_callouts(text) - # FIXME cache this dynamic regex - callout_rx = (attr? 'line-comment') ? /(?:#{::Regexp.escape(attr 'line-comment')} )?#{CalloutSourceRxt}/ : CalloutSourceRx - text.gsub(callout_rx) { - # alias match for Ruby 1.8.7 compat - m = $~ - # honor the escape - if m[1] == '\\' - # we have to do a sub since we aren't sure it's the first char - next m[0].sub('\\', '') - end - Inline.new(self, :callout, m[3], :id => @document.callouts.read_next_id).convert - } - end - # Public: Substitute post replacements # # text - The String text to process # # Returns the converted String text - def sub_post_replacements(text) - if (@document.attributes.has_key? 'hardbreaks') || (@attributes.has_key? 'hardbreaks-option') - lines = (text.split EOL) - return text if lines.size == 1 + def sub_post_replacements text + #if attr? 'hardbreaks-option', nil, true + if @attributes['hardbreaks-option'] || @document.attributes['hardbreaks-option'] + lines = text.split LF, -1 + return text if lines.size < 2 last = lines.pop - lines.map {|line| Inline.new(self, :break, line.rstrip.chomp(LINE_BREAK), :type => :line).convert }.push(last) * EOL - elsif text.include? '+' - text.gsub(LineBreakRx) { Inline.new(self, :break, $~[1], :type => :line).convert } + (lines.map do |line| + Inline.new(self, :break, (line.end_with? HARD_LINE_BREAK) ? (line.slice 0, line.length - 2) : line, type: :line).convert + end << last).join LF + elsif (text.include? PLUS) && (text.include? HARD_LINE_BREAK) + text.gsub(HardLineBreakRx) { Inline.new(self, :break, $1, type: :line).convert } else text end end - # Internal: Convert a quoted text region + # Public: Apply verbatim substitutions on source (for use when highlighting is disabled). # - # match - The MatchData for the quoted text region - # type - The quoting type (single, double, strong, emphasis, monospaced, etc) - # scope - The scope of the quoting (constrained or unconstrained) + # source - the source code String on which to apply verbatim substitutions + # process_callouts - a Boolean flag indicating whether callout marks should be substituted # - # Returns The converted String text for the quoted text region - def convert_quoted_text(match, type, scope) - unescaped_attrs = nil - if match[0].start_with? '\\' - if scope == :constrained && !(attrs = match[2]).nil_or_empty? - unescaped_attrs = %([#{attrs}]) + # Returns the substituted source + def sub_source source, process_callouts + process_callouts ? sub_callouts(sub_specialchars source) : (sub_specialchars source) + end + + # Public: Substitute callout source references + # + # text - The String text to process + # + # Returns the converted String text + def sub_callouts text + callout_rx = (attr? 'line-comment') ? CalloutSourceRxMap[attr 'line-comment'] : CalloutSourceRx + autonum = 0 + text.gsub callout_rx do + # honor the escape + if $2 + # use sub since it might be behind a line comment + $&.sub RS, '' else - return match[0][1..-1] + Inline.new(self, :callout, $4 == '.' ? (autonum += 1).to_s : $4, id: @document.callouts.read_next_id, attributes: { 'guard' => $1 }).convert end end + end - if scope == :constrained - if unescaped_attrs - %(#{unescaped_attrs}#{Inline.new(self, :quoted, match[3], :type => type).convert}) - else - if (attributes = parse_quoted_text_attributes(match[2])) - id = attributes.delete 'id' - type = :unquoted if type == :mark + # Public: Highlight (i.e., colorize) the source code during conversion using a syntax highlighter, if activated by the + # source-highlighter document attribute. Otherwise return the text with verbatim substitutions applied. + # + # If the process_callouts argument is true, this method will extract the callout marks from the source before passing + # it to the syntax highlighter, then subsequently restore those callout marks to the highlighted source so the callout + # marks don't confuse the syntax highlighter. + # + # source - the source code String to syntax highlight + # process_callouts - a Boolean flag indicating whether callout marks should be located and substituted + # + # Returns the highlighted source code, if a syntax highlighter is defined on the document, otherwise the source with + # verbatim substituions applied + def highlight_source source, process_callouts + # NOTE the call to highlight? is a defensive check since, normally, we wouldn't arrive here unless it returns true + return sub_source source, process_callouts unless (syntax_hl = @document.syntax_highlighter) && syntax_hl.highlight? + + source, callout_marks = extract_callouts source if process_callouts + + doc_attrs = @document.attributes + syntax_hl_name = syntax_hl.name + if (linenums_mode = (attr? 'linenums') ? (doc_attrs[%(#{syntax_hl_name}-linenums-mode)] || :table).to_sym : nil) + start_line_number = 1 if (start_line_number = (attr 'start', 1).to_i) < 1 + end + highlight_lines = resolve_lines_to_highlight source, (attr 'highlight') if attr? 'highlight' + + highlighted, source_offset = syntax_hl.highlight self, source, (attr 'language'), + callouts: callout_marks, + css_mode: (doc_attrs[%(#{syntax_hl_name}-css)] || :class).to_sym, + highlight_lines: highlight_lines, + number_lines: linenums_mode, + start_line_number: start_line_number, + style: doc_attrs[%(#{syntax_hl_name}-style)] + + # fix passthrough placeholders that got caught up in syntax highlighting + highlighted = highlighted.gsub HighlightedPassSlotRx, %(#{PASS_START}\\1#{PASS_END}) unless @passthroughs.empty? + + # NOTE highlight method may have depleted callouts + callout_marks.nil_or_empty? ? highlighted : (restore_callouts highlighted, callout_marks, source_offset) + end + + # Public: Resolve the line numbers in the specified source to highlight from the provided spec. + # + # e.g., highlight="1-5, !2, 10" or highlight=1-5;!2,10 + # + # source - The String source. + # spec - The lines specifier (e.g., "1-5, !2, 10" or "1..5;!2;10") + # + # Returns an [Array] of unique, sorted line numbers. + def resolve_lines_to_highlight source, spec + lines = [] + spec = spec.delete ' ' if spec.include? ' ' + ((spec.include? ',') ? (spec.split ',') : (spec.split ';')).map do |entry| + if entry.start_with? '!' + entry = entry.slice 1, entry.length + negate = true + end + if (delim = (entry.include? '..') ? '..' : ((entry.include? '-') ? '-' : nil)) + from, delim, to = entry.partition delim + to = (source.count LF) + 1 if to.empty? || (to = to.to_i) < 0 + line_nums = (from.to_i..to).to_a + if negate + lines -= line_nums else - id = nil + lines.concat line_nums end - %(#{match[1]}#{Inline.new(self, :quoted, match[3], :type => type, :id => id, :attributes => attributes).convert}) - end - else - if (attributes = parse_quoted_text_attributes(match[1])) - id = attributes.delete 'id' - type = :unquoted if type == :mark else - id = nil + if negate + lines.delete entry.to_i + else + lines << entry.to_i + end end - Inline.new(self, :quoted, match[2], :type => type, :id => id, :attributes => attributes).convert end + lines.sort.uniq end - # Internal: Parse the attributes that are defined on quoted text + # Public: Extract the passthrough text from the document for reinsertion after processing. # - # str - A String of unprocessed attributes (space-separated roles or the id/role shorthand syntax) + # text - The String from which to extract passthrough fragements # - # returns nil if str is nil, an empty Hash if str is empty, otherwise a Hash of attributes (role and id only) - def parse_quoted_text_attributes(str) - return unless str - return {} if str.empty? - str = sub_attributes(str) if str.include?('{') - str = str.strip - # for compliance, only consider first positional attribute - str, _ = str.split(',', 2) if str.include?(',') + # Returns the String text with passthrough regions substituted with placeholders + def extract_passthroughs text + compat_mode = @document.compat_mode + passthrus = @passthroughs + text = text.gsub InlinePassMacroRx do + if (boundary = $4) # $$, ++, or +++ + # skip ++ in compat mode, handled as normal quoted text + next %(#{$2 ? "#{$1}[#{$2}]#{$3}" : "#{$1}#{$3}"}++#{extract_passthroughs $5}++) if compat_mode && boundary == '++' - if str.empty? - {} - elsif (str.start_with?('.') || str.start_with?('#')) && Compliance.shorthand_property_syntax - segments = str.split('#', 2) + if (attrlist = $2) + if (escape_count = $3.length) > 0 + # NOTE we don't look for nested unconstrained pass macros + next %(#{$1}[#{attrlist}]#{RS * (escape_count - 1)}#{boundary}#{$5}#{boundary}) + elsif $1 == RS + preceding = %([#{attrlist}]) + else + if boundary == '++' && (attrlist.end_with? 'x-') + old_behavior = true + attrlist = attrlist.slice 0, attrlist.length - 2 + end + attributes = parse_quoted_text_attributes attrlist + end + elsif (escape_count = $3.length) > 0 + # NOTE we don't look for nested unconstrained pass macros + next %(#{RS * (escape_count - 1)}#{boundary}#{$5}#{boundary}) + end + subs = (boundary == '+++' ? [] : BASIC_SUBS) - if segments.length > 1 - id, *more_roles = segments[1].split('.') - else - id = nil - more_roles = [] + if attributes + if old_behavior + passthrus[passthru_key = passthrus.size] = { text: $5, subs: NORMAL_SUBS, type: :monospaced, attributes: attributes } + else + passthrus[passthru_key = passthrus.size] = { text: $5, subs: subs, type: :unquoted, attributes: attributes } + end + else + passthrus[passthru_key = passthrus.size] = { text: $5, subs: subs } + end + else # pass:[] + # NOTE we don't look for nested pass:[] macros + # honor the escape + next $&.slice 1, $&.length if $6 == RS + if (subs = $7) + passthrus[passthru_key = passthrus.size] = { text: (normalize_text $8, nil, true), subs: (resolve_pass_subs subs) } + else + passthrus[passthru_key = passthrus.size] = { text: (normalize_text $8, nil, true) } + end end - roles = segments[0].empty? ? [] : segments[0].split('.') - if roles.length > 1 - roles.shift - end + %(#{preceding || ''}#{PASS_START}#{passthru_key}#{PASS_END}) + end if (text.include? '++') || (text.include? '$$') || (text.include? 'ss:') - if more_roles.length > 0 - roles.concat more_roles + pass_inline_char1, pass_inline_char2, pass_inline_rx = InlinePassRx[compat_mode] + text = text.gsub pass_inline_rx do + preceding = $1 + attrlist = $2 + escape_mark = RS if (quoted_text = $3).start_with? RS + format_mark = $4 + content = $5 + + if compat_mode + old_behavior = true + elsif (old_behavior = attrlist && (attrlist.end_with? 'x-')) + attrlist = attrlist.slice 0, attrlist.length - 2 end - attrs = {} - attrs['id'] = id if id - attrs['role'] = roles * ' ' unless roles.empty? - attrs - else - {'role' => str} - end - end + if attrlist + if format_mark == '`' && !old_behavior + next extract_inner_passthrough content, %(#{preceding}[#{attrlist}]#{escape_mark}) + elsif escape_mark + # honor the escape of the formatting mark + next %(#{preceding}[#{attrlist}]#{quoted_text.slice 1, quoted_text.length}) + elsif preceding == RS + # honor the escape of the attributes + preceding = %([#{attrlist}]) + else + attributes = parse_quoted_text_attributes attrlist + end + elsif format_mark == '`' && !old_behavior + next extract_inner_passthrough content, %(#{preceding}#{escape_mark}) + elsif escape_mark + # honor the escape of the formatting mark + next %(#{preceding}#{quoted_text.slice 1, quoted_text.length}) + end - # Internal: Parse the attributes in the attribute line - # - # attrline - A String of unprocessed attributes (key/value pairs) - # posattrs - The keys for positional attributes - # - # returns nil if attrline is nil, an empty Hash if attrline is empty, otherwise a Hash of parsed attributes - def parse_attributes(attrline, posattrs = ['role'], opts = {}) - return unless attrline - return {} if attrline.empty? - attrline = @document.sub_attributes(attrline) if opts[:sub_input] - attrline = unescape_bracketed_text(attrline) if opts[:unescape_input] - block = nil - if opts.fetch(:sub_result, true) - # substitutions are only performed on attribute values if block is not nil - block = self - end + if compat_mode + passthrus[passthru_key = passthrus.size] = { text: content, subs: BASIC_SUBS, attributes: attributes, type: :monospaced } + elsif attributes + if old_behavior + subs = (format_mark == '`' ? BASIC_SUBS : NORMAL_SUBS) + passthrus[passthru_key = passthrus.size] = { text: content, subs: subs, attributes: attributes, type: :monospaced } + else + passthrus[passthru_key = passthrus.size] = { text: content, subs: BASIC_SUBS, attributes: attributes, type: :unquoted } + end + else + passthrus[passthru_key = passthrus.size] = { text: content, subs: BASIC_SUBS } + end - if (into = opts[:into]) - AttributeList.new(attrline, block).parse_into(into, posattrs) - else - AttributeList.new(attrline, block).parse(posattrs) - end - end + %(#{preceding}#{PASS_START}#{passthru_key}#{PASS_END}) + end if (text.include? pass_inline_char1) || (pass_inline_char2 && (text.include? pass_inline_char2)) - # Internal: Strip bounding whitespace, fold endlines and unescaped closing - # square brackets from text extracted from brackets - def unescape_bracketed_text(text) - return '' if text.empty? - # FIXME make \] a regex - text.strip.tr(EOL, ' ').gsub('\]', ']') - end + # NOTE we need to do the stem in a subsequent step to allow it to be escaped by the former + text = text.gsub InlineStemMacroRx do + # honor the escape + next $&.slice 1, $&.length if $&.start_with? RS - # Internal: Strip bounding whitespace and fold endlines - def normalize_string str, unescape_brackets = false - if str.empty? - '' - elsif unescape_brackets - unescape_brackets str.strip.tr(EOL, ' ') - else - str.strip.tr(EOL, ' ') - end - end + if (type = $1.to_sym) == :stem + type = STEM_TYPE_ALIASES[@document.attributes['stem']].to_sym + end + subs = $2 + content = normalize_text $3, nil, true + # NOTE drop enclosing $ signs around latexmath for backwards compatibility with AsciiDoc Python + content = content.slice 1, content.length - 2 if type == :latexmath && (content.start_with? '$') && (content.end_with? '$') + subs = subs ? (resolve_pass_subs subs) : ((@document.basebackend? 'html') ? BASIC_SUBS : nil) + passthrus[passthru_key = passthrus.size] = { text: content, subs: subs, type: type } + %(#{PASS_START}#{passthru_key}#{PASS_END}) + end if (text.include? ':') && ((text.include? 'stem:') || (text.include? 'math:')) - # Internal: Unescape closing square brackets. - # Intended for text extracted from square brackets. - def unescape_brackets str - # FIXME make \] a regex - str.empty? ? '' : str.gsub('\]', ']') + text end - # Internal: Split text formatted as CSV with support - # for double-quoted values (in which commas are ignored) - def split_simple_csv str - if str.empty? - values = [] - elsif str.include? '"' - values = [] - current = [] - quote_open = false - str.each_char do |c| - case c - when ',' - if quote_open - current.push c - else - values << current.join.strip - current = [] + # Public: Restore the passthrough text by reinserting into the placeholder positions + # + # text - The String text into which to restore the passthrough text + # + # returns The String text with the passthrough text restored + def restore_passthroughs text + passthrus = @passthroughs + text.gsub PassSlotRx do + if (pass = passthrus[$1.to_i]) + subbed_text = apply_subs(pass[:text], pass[:subs]) + if (type = pass[:type]) + if (attributes = pass[:attributes]) + id = attributes['id'] end - when '"' - quote_open = !quote_open - else - current.push c + subbed_text = Inline.new(self, :quoted, subbed_text, type: type, id: id, attributes: attributes).convert end + subbed_text.include?(PASS_START) ? restore_passthroughs(subbed_text) : subbed_text + else + logger.error %(unresolved passthrough detected: #{text}) + '??pass??' end - - values << current.join.strip - else - values = str.split(',').map {|it| it.strip } end - - values end - # Internal: Resolve the list of comma-delimited subs against the possible options. + # Public: Resolve the list of comma-delimited subs against the possible options. # - # subs - A comma-delimited String of substitution aliases + # subs - The comma-delimited String of substitution names or aliases. + # type - A Symbol representing the context for which the subs are being resolved (default: :block). + # defaults - An Array of substitutions to start with when computing incremental substitutions (default: nil). + # subject - The String to use in log messages to communicate the subject for which subs are being resolved (default: nil) # - # returns An Array of Symbols representing the substitution operation + # Returns An Array of Symbols representing the substitution operation or nothing if no subs are found. def resolve_subs subs, type = :block, defaults = nil, subject = nil - return [] if subs.nil_or_empty? + return if subs.nil_or_empty? + # QUESTION should we store candidates as a Set instead of an Array? candidates = nil - modifiers_present = SubModifierSniffRx =~ subs - subs.tr(' ', '').split(',').each do |key| + subs = subs.delete ' ' if subs.include? ' ' + modifiers_present = SubModifierSniffRx.match? subs + subs.split(',').each do |key| modifier_operation = nil if modifiers_present if (first = key.chr) == '+' modifier_operation = :append - key = key[1..-1] + key = key.slice 1, key.length elsif first == '-' modifier_operation = :remove - key = key[1..-1] + key = key.slice 1, key.length elsif key.end_with? '+' modifier_operation = :prepend key = key.chop @@ -1336,12 +1177,12 @@ key = key.to_sym # special case to disable callouts for inline subs if type == :inline && (key == :verbatim || key == :v) - resolved_keys = [:specialcharacters] - elsif COMPOSITE_SUBS.key? key - resolved_keys = COMPOSITE_SUBS[key] - elsif type == :inline && key.length == 1 && (SUB_SYMBOLS.key? key) - resolved_key = SUB_SYMBOLS[key] - if (candidate = COMPOSITE_SUBS[resolved_key]) + resolved_keys = BASIC_SUBS + elsif SUB_GROUPS.key? key + resolved_keys = SUB_GROUPS[key] + elsif type == :inline && key.length == 1 && (SUB_HINTS.key? key) + resolved_key = SUB_HINTS[key] + if (candidate = SUB_GROUPS[resolved_key]) resolved_keys = candidate else resolved_keys = [resolved_key] @@ -1351,7 +1192,7 @@ end if modifier_operation - candidates ||= (defaults ? defaults.dup : []) + candidates ||= (defaults ? (defaults.drop 0) : []) case modifier_operation when :append candidates += resolved_keys @@ -1365,265 +1206,327 @@ candidates += resolved_keys end end - # weed out invalid options and remove duplicates (first wins) - # TODO may be use a set instead? + return unless candidates + # weed out invalid options and remove duplicates (order is preserved; first occurence wins) resolved = candidates & SUB_OPTIONS[type] unless (candidates - resolved).empty? invalid = candidates - resolved - warn %(asciidoctor: WARNING: invalid substitution type#{invalid.size > 1 ? 's' : ''}#{subject ? ' for ' : nil}#{subject}: #{invalid * ', '}) + logger.warn %(invalid substitution type#{invalid.size > 1 ? 's' : ''}#{subject ? ' for ' : ''}#{subject}: #{invalid.join ', '}) end resolved end + # Public: Call resolve_subs for the :block type. def resolve_block_subs subs, defaults, subject resolve_subs subs, :block, defaults, subject end + # Public: Call resolve_subs for the :inline type with the subject set as passthrough macro. def resolve_pass_subs subs resolve_subs subs, :inline, nil, 'passthrough macro' end - # Public: Highlight the source code if a source highlighter is defined - # on the document, otherwise return the text unprocessed - # - # Callout marks are stripped from the source prior to passing it to the - # highlighter, then later restored in converted form, so they are not - # incorrectly processed by the source highlighter. + # Public: Expand all groups in the subs list and return. If no subs are resolve, return nil. # - # source - the source code String to highlight - # process_callouts - a Boolean flag indicating whether callout marks should be substituted + # subs - The substitutions to expand; can be a Symbol, Symbol Array or nil # - # returns the highlighted source code, if a source highlighter is defined - # on the document, otherwise the source with verbatim substituions applied - def highlight_source source, process_callouts, highlighter = nil - case (highlighter ||= @document.attributes['source-highlighter']) - when 'coderay' - unless (highlighter_loaded = defined? ::CodeRay) || @document.attributes['coderay-unavailable'] - if (Helpers.require_library 'coderay', true, :warn).nil? - # prevent further attempts to load CodeRay - @document.set_attr 'coderay-unavailable', '' - else - highlighter_loaded = true - end - end - when 'pygments' - unless (highlighter_loaded = defined? ::Pygments) || @document.attributes['pygments-unavailable'] - if (Helpers.require_library 'pygments', 'pygments.rb', :warn).nil? - # prevent further attempts to load Pygments - @document.set_attr 'pygments-unavailable', '' - else - highlighter_loaded = true - end + # Returns a Symbol Array of substitutions to pass to apply_subs or nil if no substitutions were resolved. + def expand_subs subs + if ::Symbol === subs + unless subs == :none + SUB_GROUPS[subs] || [subs] end else - # unknown highlighting library (something is misconfigured if we arrive here) - highlighter_loaded = false + expanded_subs = [] + subs.each do |key| + unless key == :none + if (sub_group = SUB_GROUPS[key]) + expanded_subs += sub_group + else + expanded_subs << key + end + end + end + + expanded_subs.empty? ? nil : expanded_subs + end + end + + # Internal: Commit the requested substitutions to this block. + # + # Looks for an attribute named "subs". If present, resolves substitutions + # from the value of that attribute and assigns them to the subs property on + # this block. Otherwise, uses the substitutions assigned to the default_subs + # property, if specified, or selects a default set of substitutions based on + # the content model of the block. + # + # Returns nothing + def commit_subs + unless (default_subs = @default_subs) + case @content_model + when :simple + default_subs = NORMAL_SUBS + when :verbatim + # NOTE :literal with listparagraph-option gets folded into text of list item later + default_subs = @context == :verse ? NORMAL_SUBS : VERBATIM_SUBS + when :raw + # TODO make pass subs a compliance setting; AsciiDoc Python performs :attributes and :macros on a pass block + default_subs = @context == :stem ? BASIC_SUBS : NO_SUBS + else + return @subs + end end - return sub_source source, process_callouts unless highlighter_loaded + if (custom_subs = @attributes['subs']) + @subs = (resolve_block_subs custom_subs, default_subs, @context) || [] + else + @subs = default_subs.drop 0 + end - lineno = 0 - callout_on_last = false - if process_callouts - callout_marks = {} - last = -1 - # FIXME cache this dynamic regex - callout_rx = (attr? 'line-comment') ? /(?:#{::Regexp.escape(attr 'line-comment')} )?#{CalloutExtractRxt}/ : CalloutExtractRx - # extract callout marks, indexed by line number - source = source.split(EOL).map {|line| - lineno = lineno + 1 - line.gsub(callout_rx) { - # alias match for Ruby 1.8.7 compat - m = $~ - # honor the escape - if m[1] == '\\' - m[0].sub('\\', '') - else - (callout_marks[lineno] ||= []) << m[3] - last = lineno - nil - end - } - } * EOL - callout_on_last = (last == lineno) - callout_marks = nil if callout_marks.empty? + # QUESION delegate this logic to a method? + if @context == :listing && @style == 'source' && (syntax_hl = @document.syntax_highlighter) && + syntax_hl.highlight? && (idx = @subs.index :specialcharacters) + @subs[idx] = :highlight + end + + nil + end + + # Internal: Parse attributes in name or name=value format from a comma-separated String + # + # attrlist - A comma-separated String list of attributes in name or name=value format. + # posattrs - An Array of positional attribute names (default: []). + # opts - A Hash of options to control how the string is parsed (default: {}): + # :into - The Hash to parse the attributes into (optional, default: false). + # :sub_input - A Boolean that indicates whether to substitute attributes prior to + # parsing (optional, default: false). + # :sub_result - A Boolean that indicates whether to apply substitutions + # single-quoted attribute values (optional, default: true). + # :unescape_input - A Boolean that indicates whether to unescape square brackets prior + # to parsing (optional, default: false). + # + # Returns an empty Hash if attrlist is nil or empty, otherwise a Hash of parsed attributes. + def parse_attributes attrlist, posattrs = [], opts = {} + return {} if attrlist ? attrlist.empty? : true + attrlist = normalize_text attrlist, true, true if opts[:unescape_input] + attrlist = @document.sub_attributes attrlist if opts[:sub_input] && (attrlist.include? ATTR_REF_HEAD) + # substitutions are only performed on attribute values if block is not nil + block = self if opts[:sub_result] + if (into = opts[:into]) + AttributeList.new(attrlist, block).parse_into(into, posattrs) else - callout_marks = nil + AttributeList.new(attrlist, block).parse(posattrs) end + end - linenums_mode = nil - highlight_lines = nil + private - case highlighter - when 'coderay' - if (linenums_mode = (attr? 'linenums') ? (@document.attributes['coderay-linenums-mode'] || :table).to_sym : nil) - if attr? 'highlight', nil, false - highlight_lines = resolve_highlight_lines(attr 'highlight', nil, false) - end - end - result = ::CodeRay::Duo[attr('language', :text, false).to_sym, :html, { - :css => (@document.attributes['coderay-css'] || :class).to_sym, - :line_numbers => linenums_mode, - :line_number_anchors => false, - :highlight_lines => highlight_lines, - :bold_every => false}].highlight source - when 'pygments' - lexer = ::Pygments::Lexer[attr('language', nil, false)] || ::Pygments::Lexer['text'] - opts = { :cssclass => 'pyhl', :classprefix => 'tok-', :nobackground => true } - unless (@document.attributes['pygments-css'] || 'class') == 'class' - opts[:noclasses] = true - opts[:style] = (@document.attributes['pygments-style'] || Stylesheets::DEFAULT_PYGMENTS_STYLE) - end - if attr? 'highlight', nil, false - unless (highlight_lines = resolve_highlight_lines(attr 'highlight', nil, false)).empty? - opts[:hl_lines] = highlight_lines * ' ' - end - end - if attr? 'linenums' - # TODO we could add the line numbers in ourselves instead of having to strip out the junk - # FIXME move these regular expressions into constants - if (opts[:linenos] = @document.attributes['pygments-linenums-mode'] || 'table') == 'table' - linenums_mode = :table - # NOTE these subs clean out HTML that messes up our styles - result = lexer.highlight(source, :options => opts). - sub(/
    (.*)<\/div>/m, '\1'). - gsub(/]*>(.*?)<\/pre>\s*/m, '\1') - else - result = lexer.highlight(source, :options => opts). - sub(/
    ]*>(.*?)<\/pre><\/div>/m, '\1') + # Internal: Extract the callout numbers from the source to prepare it for syntax highlighting. + def extract_callouts source + callout_marks = {} + lineno = 0 + last_lineno = nil + callout_rx = (attr? 'line-comment') ? CalloutExtractRxMap[attr 'line-comment'] : CalloutExtractRx + # extract callout marks, indexed by line number + source = (source.split LF, -1).map do |line| + lineno += 1 + line.gsub callout_rx do + # honor the escape + if $2 + # use sub since it might be behind a line comment + $&.sub RS, '' + else + (callout_marks[lineno] ||= []) << [$1, $4] + last_lineno = lineno + '' end - else - # nowrap gives us just the highlighted source; won't work when we need linenums though - opts[:nowrap] = true - result = lexer.highlight(source, :options => opts) end + end.join LF + if last_lineno + source = %(#{source}#{LF}) if last_lineno == lineno + else + callout_marks = nil end + [source, callout_marks] + end - # fix passthrough placeholders that got caught up in syntax highlighting - unless @passthroughs.empty? - result = result.gsub PASS_MATCH_HI, %(#{PASS_START}\\1#{PASS_END}) + # Internal: Restore the callout numbers to the highlighted source. + def restore_callouts source, callout_marks, source_offset = nil + if source_offset + preamble = source.slice 0, source_offset + source = source.slice source_offset, source.length + else + preamble = '' end - - if process_callouts && callout_marks - lineno = 0 - reached_code = linenums_mode != :table - result.split(EOL).map {|line| - unless reached_code - unless line.include?('') - next line - end - reached_code = true - end - lineno = lineno + 1 - if (conums = callout_marks.delete(lineno)) - tail = nil - if callout_on_last && callout_marks.empty? - # QUESTION when does this happen? - if (pos = line.index '') - tail = line[pos..-1] - line = %(#{line[0...pos].chomp ' '} ) - else - # Give conum on final line breathing room if trailing space in source is dropped - line = %(#{line.chomp ' '} ) - end - end - if conums.size == 1 - %(#{line}#{Inline.new(self, :callout, conums[0], :id => @document.callouts.read_next_id).convert }#{tail}) - else - conums_markup = conums.map {|conum| Inline.new(self, :callout, conum, :id => @document.callouts.read_next_id).convert } * ' ' - %(#{line}#{conums_markup}#{tail}) - end - else - line + autonum = lineno = 0 + preamble + ((source.split LF, -1).map do |line| + if (conums = callout_marks.delete lineno += 1) + if conums.size == 1 + guard, conum = conums[0] + %(#{line}#{Inline.new(self, :callout, conum == '.' ? (autonum += 1).to_s : conum, id: @document.callouts.read_next_id, attributes: { 'guard' => guard }).convert}) + else + %(#{line}#{conums.map do |guard_it, conum_it| + Inline.new(self, :callout, conum_it == '.' ? (autonum += 1).to_s : conum_it, id: @document.callouts.read_next_id, attributes: { 'guard' => guard_it }).convert + end.join ' '}) end - } * EOL + else + line + end + end.join LF) + end + + # Internal: Extract nested single-plus passthrough; otherwise return unprocessed + def extract_inner_passthrough text, pre + if (text.end_with? '+') && (text.start_with? '+', '\+') && SinglePlusInlinePassRx =~ text + if $1 + %(#{pre}`+#{$2}+`) + else + @passthroughs[passthru_key = @passthroughs.size] = { text: $2, subs: BASIC_SUBS } + %(#{pre}`#{PASS_START}#{passthru_key}#{PASS_END}`) + end else - result + %(#{pre}`#{text}`) end end - # e.g., highlight="1-5, !2, 10" or highlight=1-5;!2,10 - def resolve_highlight_lines spec - lines = [] - spec.delete(' ').split(DataDelimiterRx).map do |entry| - negate = false - if entry.start_with? '!' - entry = entry[1..-1] - negate = true + # Internal: Convert a quoted text region + # + # match - The MatchData for the quoted text region + # type - The quoting type (single, double, strong, emphasis, monospaced, etc) + # scope - The scope of the quoting (constrained or unconstrained) + # + # Returns The converted String text for the quoted text region + def convert_quoted_text match, type, scope + if match[0].start_with? RS + if scope == :constrained && (attrs = match[2]) + unescaped_attrs = %([#{attrs}]) + else + return match[0].slice 1, match[0].length end - if entry.include? '-' - s, e = entry.split '-', 2 - line_nums = (s.to_i..e.to_i).to_a - if negate - lines -= line_nums - else - lines.concat line_nums - end + end + + if scope == :constrained + if unescaped_attrs + %(#{unescaped_attrs}#{Inline.new(self, :quoted, match[3], type: type).convert}) else - if negate - lines.delete entry.to_i - else - lines << entry.to_i + if (attrlist = match[2]) + id = (attributes = parse_quoted_text_attributes attrlist)['id'] + type = :unquoted if type == :mark end + %(#{match[1]}#{Inline.new(self, :quoted, match[3], type: type, id: id, attributes: attributes).convert}) end + else + if (attrlist = match[1]) + id = (attributes = parse_quoted_text_attributes attrlist)['id'] + type = :unquoted if type == :mark + end + Inline.new(self, :quoted, match[2], type: type, id: id, attributes: attributes).convert end - lines.sort.uniq end - # Public: Apply verbatim substitutions on source (for use when highlighting is disabled). - # - # source - the source code String on which to apply verbatim substitutions - # process_callouts - a Boolean flag indicating whether callout marks should be substituted + # Internal: Substitute replacement text for matched location # - # returns the substituted source - def sub_source source, process_callouts - return process_callouts ? sub_callouts(sub_specialchars(source)) : sub_specialchars(source) + # returns The String text with the replacement characters substituted + def do_replacement m, replacement, restore + if (captured = m[0]).include? RS + # we have to use sub since we aren't sure it's the first char + captured.sub RS, '' + else + case restore + when :none + replacement + when :bounding + m[1] + replacement + m[2] + else # :leading + m[1] + replacement + end + end end - # Internal: Lock-in the substitutions for this block + # Internal: Inserts text into a formatted text enclosure; used by xreftext + alias sub_placeholder sprintf unless RUBY_ENGINE == 'opal' + + # Internal: Parse the attributes that are defined on quoted (aka formatted) text # - # Looks for an attribute named "subs". If present, resolves the - # substitutions and assigns it to the subs property on this block. - # Otherwise, assigns a set of default substitutions based on the - # content model of the block. + # str - A non-nil String of unprocessed attributes; + # space-separated roles (e.g., role1 role2) or the id/role shorthand syntax (e.g., #idname.role) # - # Returns nothing - def lock_in_subs - if @default_subs - default_subs = @default_subs - else - case @content_model - when :simple - default_subs = SUBS[:normal] - when :verbatim - if @context == :listing || (@context == :literal && !(option? 'listparagraph')) - default_subs = SUBS[:verbatim] - elsif @context == :verse - default_subs = SUBS[:normal] - else - default_subs = SUBS[:basic] - end - when :raw - if @context == :stem - default_subs = SUBS[:basic] - else - default_subs = SUBS[:pass] - end + # Returns a Hash of attributes (role and id only) + def parse_quoted_text_attributes str + return {} if (str = str.rstrip).empty? + # NOTE attributes are typically resolved after quoted text, so substitute eagerly + str = sub_attributes str if str.include? ATTR_REF_HEAD + # for compliance, only consider first positional attribute (very unlikely) + str = str.slice 0, (str.index ',') if str.include? ',' + + if (str.start_with? '.', '#') && Compliance.shorthand_property_syntax + segments = str.split '#', 2 + + if segments.size > 1 + id, *more_roles = segments[1].split('.') else - return + more_roles = [] end - end - if (custom_subs = @attributes['subs']) - @subs = resolve_block_subs custom_subs, default_subs, @context + roles = segments[0].empty? ? [] : segments[0].split('.') + if roles.size > 1 + roles.shift + end + + if more_roles.size > 0 + roles.concat more_roles + end + + attrs = {} + attrs['id'] = id if id + attrs['role'] = roles.join ' ' unless roles.empty? + attrs else - @subs = default_subs.dup + { 'role' => str } end + end - # QUESION delegate this logic to a method? - if @context == :listing && @style == 'source' && @attributes['language'] && - @document.basebackend?('html') && SUB_HIGHLIGHT.include?(@document.attributes['source-highlighter']) - @subs = @subs.map {|sub| sub == :specialcharacters ? :highlight : sub } + # Internal: Normalize text to prepare it for parsing. + # + # If normalize_whitespace is true, strip surrounding whitespace and fold newlines. If unescape_closing_square_bracket + # is set, unescape any escaped closing square brackets. + # + # Returns the normalized text String + def normalize_text text, normalize_whitespace = nil, unescape_closing_square_brackets = nil + unless text.empty? + text = text.strip.tr LF, ' ' if normalize_whitespace + text = text.gsub ESC_R_SB, R_SB if unescape_closing_square_brackets && (text.include? R_SB) + end + text + end + + # Internal: Split text formatted as CSV with support + # for double-quoted values (in which commas are ignored) + def split_simple_csv str + if str.empty? + [] + elsif str.include? '"' + values = [] + accum = '' + quote_open = nil + str.each_char do |c| + case c + when ',' + if quote_open + accum = accum + c + else + values << accum.strip + accum = '' + end + when '"' + quote_open = !quote_open + else + accum = accum + c + end + end + values << accum.strip + else + str.split(',').map {|it| it.strip } end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/coderay.rb asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/coderay.rb --- asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/coderay.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/coderay.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,87 @@ +# frozen_string_literal: true +module Asciidoctor +class SyntaxHighlighter::CodeRayAdapter < SyntaxHighlighter::Base + register_for 'coderay' + + def initialize *args + super + @pre_class = 'CodeRay' + @requires_stylesheet = nil + end + + def highlight? + library_available? + end + + def highlight node, source, lang, opts + @requires_stylesheet = true if (css_mode = opts[:css_mode]) == :class + lang = lang ? (::CodeRay::Scanners[lang = lang.to_sym] && lang rescue :text) : :text + highlighted = ::CodeRay::Duo[lang, :html, + css: css_mode, + line_numbers: (line_numbers = opts[:number_lines]), + line_number_start: opts[:start_line_number], + line_number_anchors: false, + highlight_lines: opts[:highlight_lines], + bold_every: false, + ].highlight source + if line_numbers == :table && opts[:callouts] + [highlighted, (idx = highlighted.index CodeCellStartTagCs) ? idx + CodeCellStartTagCs.length : nil] + else + highlighted + end + end + + def docinfo? location + @requires_stylesheet && location == :footer + end + + def docinfo location, doc, opts + if opts[:linkcss] + %() + else + %() + end + end + + def write_stylesheet? doc + @requires_stylesheet + end + + def write_stylesheet doc, to_dir + ::File.write (::File.join to_dir, stylesheet_basename), read_stylesheet, mode: FILE_WRITE_MODE + end + + module Loader + private + + def library_available? + (@@library_status ||= load_library) == :loaded ? true : nil + end + + def load_library + (defined? ::CodeRay::Duo) ? :loaded : (Helpers.require_library 'coderay', true, :warn).nil? ? :unavailable : :loaded + end + end + + module Styles + include Loader + + def read_stylesheet + @@stylesheet_cache ||= (::File.read (::File.join Stylesheets::STYLESHEETS_DIR, stylesheet_basename), mode: FILE_READ_MODE).rstrip + end + + def stylesheet_basename + 'coderay-asciidoctor.css' + end + end + + extend Styles # exports static methods + include Loader, Styles # adds methods to instance + + CodeCellStartTagCs = '
    '
    +
    +  private_constant :CodeCellStartTagCs
    +end
    +end
    diff -Nru asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/highlightjs.rb asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/highlightjs.rb
    --- asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/highlightjs.rb	1970-01-01 00:00:00.000000000 +0000
    +++ asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/highlightjs.rb	2019-08-18 16:11:54.000000000 +0000
    @@ -0,0 +1,26 @@
    +# frozen_string_literal: true
    +module Asciidoctor
    +class SyntaxHighlighter::HighlightJsAdapter < SyntaxHighlighter::Base
    +  register_for 'highlightjs', 'highlight.js'
    +
    +  def initialize *args
    +    super
    +    @name = @pre_class = 'highlightjs'
    +  end
    +
    +  def format node, lang, opts
    +    super node, lang, (opts.merge transform: proc {|_, code| code['class'] = %(language-#{lang || 'none'} hljs) } )
    +  end
    +
    +  def docinfo? location
    +    location == :footer
    +  end
    +
    +  def docinfo location, doc, opts
    +    base_url = doc.attr 'highlightjsdir', %(#{opts[:cdn_base_url]}/highlight.js/#{HIGHLIGHT_JS_VERSION})
    +    %(
    +
    +#{(doc.attr? 'highlightjs-languages') ? ((doc.attr 'highlightjs-languages').split ',').map {|lang| %[\n] }.join : ''})
    +  end
    +end
    +end
    diff -Nru asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/html_pipeline.rb asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/html_pipeline.rb
    --- asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/html_pipeline.rb	1970-01-01 00:00:00.000000000 +0000
    +++ asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/html_pipeline.rb	2019-08-18 16:11:54.000000000 +0000
    @@ -0,0 +1,10 @@
    +# frozen_string_literal: true
    +module Asciidoctor
    +class SyntaxHighlighter::HtmlPipelineAdapter < SyntaxHighlighter::Base
    +  register_for 'html-pipeline'
    +
    +  def format node, lang, opts
    +    %(#{node.content}
    ) + end +end +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/prettify.rb asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/prettify.rb --- asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/prettify.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/prettify.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,27 @@ +# frozen_string_literal: true +module Asciidoctor +class SyntaxHighlighter::PrettifyAdapter < SyntaxHighlighter::Base + register_for 'prettify' + + def initialize *args + super + @pre_class = 'prettyprint' + end + + def format node, lang, opts + opts[:transform] = proc {|pre| pre['class'] += %( #{(start = node.attr 'start') ? %[linenums:#{start}] : 'linenums'}) } if node.attr? 'linenums' + super + end + + def docinfo? location + location == :footer + end + + def docinfo location, doc, opts + base_url = doc.attr 'prettifydir', %(#{opts[:cdn_base_url]}/prettify/r298) + prettify_theme_url = ((prettify_theme = doc.attr 'prettify-theme', 'prettify').start_with? 'http://', 'https://') ? prettify_theme : %(#{base_url}/#{prettify_theme}.min.css) + %( +) + end +end +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/pygments.rb asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/pygments.rb --- asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/pygments.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/pygments.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,149 @@ +# frozen_string_literal: true +module Asciidoctor +class SyntaxHighlighter::PygmentsAdapter < SyntaxHighlighter::Base + register_for 'pygments' + + def initialize *args + super + @requires_stylesheet = nil + @style = nil + end + + def highlight? + library_available? + end + + def highlight node, source, lang, opts + lexer = (::Pygments::Lexer.find_by_alias lang) || (::Pygments::Lexer.find_by_mimetype 'text/plain') + @requires_stylesheet = true unless (noclasses = opts[:css_mode] != :class) + highlight_opts = { + classprefix: TOKEN_CLASS_PREFIX, + cssclass: WRAPPER_CLASS, + nobackground: true, + noclasses: noclasses, + startinline: lexer.name == 'PHP' && !(node.option? 'mixed'), + stripnl: false, + style: (@style ||= (style = opts[:style]) && (style_available? style) || DEFAULT_STYLE), + } + if (highlight_lines = opts[:highlight_lines]) + highlight_opts[:hl_lines] = highlight_lines.join ' ' + end + if (linenos = opts[:number_lines]) && (highlight_opts[:linenostart] = opts[:start_line_number]) && (highlight_opts[:linenos] = linenos) == :table + if (highlighted = lexer.highlight source, options: highlight_opts) + highlighted = highlighted.sub StyledLinenoColumnStartTagsRx, LinenoColumnStartTagsCs if noclasses + highlighted = highlighted.sub WrapperTagRx, PreTagCs + opts[:callouts] ? [highlighted, (idx = highlighted.index CodeCellStartTagCs) ? idx + CodeCellStartTagCs.length : nil] : highlighted + else + node.sub_specialchars source # handles nil response from ::Pygments::Lexer#highlight + end + elsif (highlighted = lexer.highlight source, options: highlight_opts) + highlighted = highlighted.gsub StyledLinenoSpanTagRx, LinenoSpanTagCs if linenos && noclasses + highlighted.sub WrapperTagRx, '\1' + else + node.sub_specialchars source # handles nil response from ::Pygments::Lexer#highlight + end + end + + def format node, lang, opts + if opts[:css_mode] != :class && (@style = (style = opts[:style]) && (style_available? style) || DEFAULT_STYLE) && + (pre_style_attr_val = base_style @style) + opts[:transform] = proc {|pre| pre['style'] = pre_style_attr_val } + end + super + end + + def docinfo? location + @requires_stylesheet && location == :footer + end + + def docinfo location, doc, opts + if opts[:linkcss] + %() + else + %() + end + end + + def write_stylesheet? doc + @requires_stylesheet + end + + def write_stylesheet doc, to_dir + ::File.write (::File.join to_dir, (stylesheet_basename @style)), (read_stylesheet @style), mode: FILE_WRITE_MODE + end + + module Loader + private + + def library_available? + (@@library_status ||= load_library) == :loaded ? true : nil + end + + def load_library + (defined? ::Pygments::Lexer) ? :loaded : (Helpers.require_library 'pygments', 'pygments.rb', :warn).nil? ? :unavailable : :loaded + end + end + + module Styles + include Loader + + def read_stylesheet style + library_available? ? @@stylesheet_cache[style || DEFAULT_STYLE] || '/* Failed to load Pygments CSS. */' : '/* Pygments CSS disabled because Pygments is not available. */' + end + + def stylesheet_basename style + %(pygments-#{style || DEFAULT_STYLE}.css) + end + + private + + def base_style style + library_available? ? @@base_style_cache[style || DEFAULT_STYLE] : nil + end + + def style_available? style + (((@@available_styles ||= ::Pygments.styles.to_set).include? style) rescue nil) && style + end + + @@base_style_cache = ::Hash.new do |cache, key| + if BaseStyleRx =~ @@stylesheet_cache[key] + @@base_style_cache = cache.merge key => (style = $1.strip) + style + end + end + @@stylesheet_cache = ::Hash.new do |cache, key| + if (stylesheet = ::Pygments.css BASE_SELECTOR, classprefix: TOKEN_CLASS_PREFIX, style: key) + @@stylesheet_cache = cache.merge key => stylesheet + stylesheet + end + end + + DEFAULT_STYLE = 'default' + BASE_SELECTOR = 'pre.pygments' + TOKEN_CLASS_PREFIX = 'tok-' + + BaseStyleRx = /^#{BASE_SELECTOR.gsub '.', '\\.'} +\{([^}]+?)\}/ + + private_constant :BASE_SELECTOR, :TOKEN_CLASS_PREFIX, :BaseStyleRx + end + + extend Styles # exports static methods + include Loader, Styles # adds methods to instance + + CodeCellStartTagCs = '' + LinenoColumnStartTagsCs = '
    '
    +  LinenoSpanTagCs = '\1'
    +  PreTagCs = '
    \1
    ' + StyledLinenoColumnStartTagsRx = /
    /
    +  StyledLinenoSpanTagRx = %r(( *\d+ ))
    +  WRAPPER_CLASS = 'lineno' # doesn't appear in output; Pygments appends "table" to this value to make nested table class
    +  # NOTE 
     has style attribute when pygments-css=style
    +  # NOTE 
    has trailing newline when pygments-linenums-mode=table + # NOTE initial preserves leading blank lines + WrapperTagRx = %r(
    ]*?>(.*)
    \n*)m + + private_constant :CodeCellStartTagCs, :LinenoColumnStartTagsCs, :LinenoSpanTagCs, :PreTagCs, :StyledLinenoColumnStartTagsRx, :StyledLinenoSpanTagRx, :WrapperTagRx, :WRAPPER_CLASS +end +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/rouge.rb asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/rouge.rb --- asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter/rouge.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter/rouge.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,128 @@ +# frozen_string_literal: true +module Asciidoctor +class SyntaxHighlighter::RougeAdapter < SyntaxHighlighter::Base + register_for 'rouge' + + def initialize *args + super + @requires_stylesheet = @style = nil + end + + def highlight? + library_available? + end + + def highlight node, source, lang, opts + lexer = (::Rouge::Lexer.find_fancy lang) || ::Rouge::Lexers::PlainText + lexer_opts = lexer.tag == 'php' && !(node.option? 'mixed') ? { start_inline: true } : {} + @style ||= (style = opts[:style]) && (style_available? style) || DEFAULT_STYLE + if opts[:css_mode] == :class + @requires_stylesheet = true + formatter = ::Rouge::Formatters::HTML.new inline_theme: @style + else + formatter = ::Rouge::Formatters::HTMLInline.new (::Rouge::Theme.find @style).new + end + if (highlight_lines = opts[:highlight_lines]) + formatter = RougeExt::Formatters::HTMLLineHighlighter.new formatter, lines: highlight_lines + end + if opts[:number_lines] + formatter = RougeExt::Formatters::HTMLTable.new formatter, start_line: opts[:start_line_number] + if opts[:callouts] + return [(highlighted = formatter.format lexer.lex source, lexer_opts), (idx = highlighted.index CodeCellStartTagCs) ? idx + CodeCellStartTagCs.length : nil] + end + end + formatter.format lexer.lex source, lexer_opts + end + + def format node, lang, opts + if (query_idx = lang && (lang.index '?')) + lang = lang.slice 0, query_idx + end + if opts[:css_mode] != :class && (@style = (style = opts[:style]) && (style_available? style) || DEFAULT_STYLE) && + (pre_style_attr_val = base_style @style) + opts[:transform] = proc {|pre| pre['style'] = pre_style_attr_val } + end + super + end + + def docinfo? location + @requires_stylesheet && location == :footer + end + + def docinfo location, doc, opts + if opts[:linkcss] + %() + else + %() + end + end + + def write_stylesheet? doc + @requires_stylesheet + end + + def write_stylesheet doc, to_dir + ::File.write (::File.join to_dir, (stylesheet_basename @style)), (read_stylesheet @style), mode: FILE_WRITE_MODE + end + + module Loader + private + + def library_available? + (@@library_status ||= load_library) == :loaded ? true : nil + end + + def load_library + (defined? RougeExt) ? :loaded : (Helpers.require_library %(#{::File.dirname __dir__}/rouge_ext), 'rouge', :warn).nil? ? :unavailable : :loaded + end + end + + module Styles + include Loader + + def read_stylesheet style + library_available? ? @@stylesheet_cache[style || DEFAULT_STYLE] : '/* Rouge CSS disabled because Rouge is not available. */' + end + + def stylesheet_basename style + %(rouge-#{style || DEFAULT_STYLE}.css) + end + + private + + def base_style style + library_available? ? @@base_style_cache[style || DEFAULT_STYLE] : nil + end + + def style_available? style + (::Rouge::Theme.find style) && style + end + + @@base_style_cache = ::Hash.new do |cache, key| + base_style = (theme = ::Rouge::Theme.find key).base_style + (val = base_style[:fg]) && ((style ||= []) << %(color: #{theme.palette val})) + (val = base_style[:bg]) && ((style ||= []) << %(background-color: #{theme.palette val})) + @@base_style_cache = cache.merge key => (resolved_base_style = style && (style.join ';')) + resolved_base_style + end + @@stylesheet_cache = ::Hash.new do |cache, key| + @@stylesheet_cache = cache.merge key => (stylesheet = ((::Rouge::Theme.find key).render scope: BASE_SELECTOR)) + stylesheet + end + + DEFAULT_STYLE = 'github' + BASE_SELECTOR = 'pre.rouge' + + private_constant :BASE_SELECTOR + end + + extend Styles # exports static methods + include Loader, Styles # adds methods to instance + + CodeCellStartTagCs = '' + + private_constant :CodeCellStartTagCs +end +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter.rb asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter.rb --- asciidoctor-1.5.5/lib/asciidoctor/syntax_highlighter.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/syntax_highlighter.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,253 @@ +# frozen_string_literal: true +module Asciidoctor +# Public: A pluggable adapter for integrating a syntax (aka code) highlighter into AsciiDoc processing. +# +# There are two types of syntax highlighter adapters. The first performs syntax highlighting during the convert phase. +# This adapter type must define a highlight? method that returns true. The companion highlight method will then be +# called to handle the :specialcharacters substitution for source blocks. The second assumes syntax highlighting is +# performed on the client (e.g., when the HTML document is loaded). This adapter type must define a docinfo? method +# that returns true. The companion docinfo method will then be called to insert markup into the output document. The +# docinfo functionality is available to both adapter types. +# +# Asciidoctor provides several built-in adapters, including coderay, pygments, rouge, highlight.js, html-pipeline, and +# prettify. Additional adapters can be registered using SyntaxHighlighter.register or by supplying a custom factory. +module SyntaxHighlighter + # Public: Returns the String name of this syntax highlighter for referencing it in messages and option names. + attr_reader :name + + def initialize name, backend = 'html5', opts = {} + @name = @pre_class = name + end + + # Public: Indicates whether this syntax highlighter has docinfo (i.e., markup) to insert into the output document at + # the specified location. + # + # location - The Symbol representing the location slot (:head or :footer). + # + # Returns a [Boolean] indicating whether the docinfo method should be called for this location. + def docinfo? location; end + + # Public: Generates docinfo markup for this syntax highlighter to insert at the specified location in the output document. + # + # location - The Symbol representing the location slot (:head or :footer). + # doc - The Document in which this syntax highlighter is being used. + # opts - A Hash of options that configure the syntax highlighting: + # :linkcss - A Boolean indicating whether the stylesheet should be linked instead of embedded (optional). + # :cdn_base_url - The String base URL for assets loaded from the CDN. + # :self_closing_tag_slash - The String '/' if the converter calling this method emits self-closing tags. + # + # Return the [String] markup to insert. + def docinfo location, doc, opts + raise ::NotImplementedError, %(#{SyntaxHighlighter} subclass #{self.class} must implement the ##{__method__} method since #docinfo? returns true) + end + + # Public: Indicates whether highlighting is handled by this syntax highlighter or by the client. + # + # Returns a [Boolean] indicating whether the highlight method should be used to handle the :specialchars substitution. + def highlight?; end + + # Public: Highlights the specified source when this source block is being converted. + # + # If the source contains callout marks, the caller assumes the source remains on the same lines and no closing tags + # are added to the end of each line. If the source gets shifted by one or more lines, this method must return a + # tuple containing the highlighted source and the number of lines by which the source was shifted. + # + # node - The source Block to syntax highlight. + # source - The raw source text String of this source block (after preprocessing). + # lang - The source language String specified on this block (e.g., ruby). + # opts - A Hash of options that configure the syntax highlighting: + # :callouts - A Hash of callouts extracted from the source, indexed by line number (1-based) (optional). + # :css_mode - The Symbol CSS mode (:class or :inline). + # :highlight_lines - A 1-based Array of Integer line numbers to highlight (aka emphasize) (optional). + # :number_lines - A Symbol indicating whether lines should be numbered (:table or :inline) (optional). + # :start_line_number - The starting Integer (1-based) line number (optional, default: 1). + # :style - The String style (aka theme) to use for colorizing the code (optional). + # + # Returns the highlighted source String or a tuple of the highlighted source String and an Integer line offset. + def highlight node, source, lang, opts + raise ::NotImplementedError, %(#{SyntaxHighlighter} subclass #{self.class} must implement the ##{__method__} method since #highlight? returns true) + end + + # Public: Format the highlighted source for inclusion in an HTML document. + # + # node - The source Block being processed. + # lang - The source language String for this Block (e.g., ruby). + # opts - A Hash of options that control syntax highlighting: + # :nowrap - A Boolean that indicates whether wrapping should be disabled (optional). + # + # Returns the highlighted source [String] wrapped in preformatted tags (e.g., pre and code) + def format node, lang, opts + raise ::NotImplementedError, %(#{SyntaxHighlighter} subclass #{self.class} must implement the ##{__method__} method) + end + + # Public: Indicates whether this syntax highlighter wants to write a stylesheet to disk. Only called if both the + # linkcss and copycss attributes are set on the document. + # + # doc - The Document in which this syntax highlighter is being used. + # + # Returns a [Boolean] indicating whether the write_stylesheet method should be called. + def write_stylesheet? doc; end + + # Public: Writes the stylesheet to support the highlighted source(s) to disk. + # + # doc - The Document in which this syntax highlighter is being used. + # to_dir - The absolute String path of the stylesheet output directory. + # + # Returns nothing. + def write_stylesheet doc, to_dir + raise ::NotImplementedError, %(#{SyntaxHighlighter} subclass #{self.class} must implement the ##{__method__} method since #write_stylesheet? returns true) + end + + private_class_method def self.included into + into.extend Config + end || :included + + module Config + # Public: Statically register the current class in the registry for the specified names. + # + # names - one or more String or Symbol names with which to register the current class as a syntax highlighter + # implementation. Symbol arguments are coerced to Strings. + # + # Returns nothing. + def register_for *names + SyntaxHighlighter.register self, *(names.map {|name| name.to_s }) + end + end + + module Factory + # Public: Associates the syntax highlighter class or object with the specified names. + # + # syntax_highlighter - the syntax highlighter implementation to register + # names - one or more String names with which to register this syntax highlighter implementation. + # + # Returns nothing. + def register syntax_highlighter, *names + names.each {|name| registry[name] = syntax_highlighter } + end + + # Public: Retrieves the syntax highlighter class or object registered for the specified name. + # + # name - The String name of the syntax highlighter to retrieve. + # + # Returns the SyntaxHighlighter Class or Object instance registered for this name. + def for name + registry[name] + end + + # Public: Resolves the name to a syntax highlighter instance, if found in the registry. + # + # name - The String name of the syntax highlighter to create. + # backend - The String name of the backend for which this syntax highlighter is being used (default: 'html5'). + # opts - A Hash of options providing information about the context in which this syntax highlighter is used: + # :doc - The Document for which this syntax highlighter was created. + # + # Returns a [SyntaxHighlighter] instance for the specified name. + def create name, backend = 'html5', opts = {} + if (syntax_hl = self.for name) + syntax_hl = syntax_hl.new name, backend, opts if ::Class === syntax_hl + raise ::NameError, %(#{syntax_hl.class} must specify a value for `name') unless syntax_hl.name + syntax_hl + end + end + + private + + def registry + raise ::NotImplementedError, %(#{Factory} subclass #{self.class} must implement the ##{__method__} method) + end + end + + class CustomFactory + include Factory + + def initialize seed_registry = nil + @registry = seed_registry || {} + end + + private + + def registry + @registry + end + end + + module DefaultFactory + include Factory + + private + + @@registry = {} + + def registry + @@registry + end + + unless RUBY_ENGINE == 'opal' + public + + def register syntax_highlighter, *names + @@mutex.owned? ? names.each {|name| @@registry = @@registry.merge name => syntax_highlighter } : + @@mutex.synchronize { register syntax_highlighter, *names } + end + + # This method will lazy require and register additional built-in implementations, which include coderay, + # pygments, rouge, and prettify. Refer to {Factory#for} for parameters and return value. + def for name + @@registry.fetch name do + @@mutex.synchronize do + @@registry.fetch name do + if (require_path = PROVIDED[name]) + require require_path + @@registry[name] + else + @@registry = @@registry.merge name => nil + nil + end + end + end + end + end + + PROVIDED = { + 'coderay' => %(#{__dir__}/syntax_highlighter/coderay), + 'prettify' => %(#{__dir__}/syntax_highlighter/prettify), + 'pygments' => %(#{__dir__}/syntax_highlighter/pygments), + 'rouge' => %(#{__dir__}/syntax_highlighter/rouge), + } + + private + + @@mutex = ::Mutex.new + end + end + + class DefaultFactoryProxy < CustomFactory + include DefaultFactory # inserts module into ancestors immediately after superclass + + def for name + @registry.fetch(name) { super } + end unless RUBY_ENGINE == 'opal' + end + + class Base + include SyntaxHighlighter + + def format node, lang, opts + class_attr_val = opts[:nowrap] ? %(#{@pre_class} highlight nowrap) : %(#{@pre_class} highlight) + if (transform = opts[:transform]) + pre = { 'class' => class_attr_val } + code = lang ? { 'data-lang' => lang } : {} + transform[pre, code] + %(#{node.content}
    ) + else + %(
    #{node.content}
    ) + end + end + end + + extend DefaultFactory # exports static methods +end +end + +require_relative 'syntax_highlighter/highlightjs' +require_relative 'syntax_highlighter/html_pipeline' unless RUBY_ENGINE == 'opal' diff -Nru asciidoctor-1.5.5/lib/asciidoctor/table.rb asciidoctor-2.0.10/lib/asciidoctor/table.rb --- asciidoctor-1.5.5/lib/asciidoctor/table.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/table.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,8 +1,10 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor # Public: Methods and constants for managing AsciiDoc table content in a document. # It supports all three of AsciiDoc's table formats: psv, dsv and csv. class Table < AbstractBlock + # precision of column widths + DEFAULT_PRECISION = 4 # Public: A data object that encapsulates the collection of rows (head, foot, body) for a table class Rows @@ -14,49 +16,29 @@ @body = body end - alias :[] :send - end - - # Public: A String key that specifies the default table format in AsciiDoc (psv) - DEFAULT_DATA_FORMAT = 'psv' - - # Public: An Array of String keys that represent the table formats in AsciiDoc - DATA_FORMATS = ['psv', 'dsv', 'csv'] - - # Public: A Hash mapping the AsciiDoc table formats to their default delimiters - DEFAULT_DELIMITERS = { - 'psv' => '|', - 'dsv' => ':', - 'csv' => ',' - } - - # Public: A Hash mapping styles abbreviations to styles that can be applied - # to a table column or cell - TEXT_STYLES = { - 'd' => :none, - 's' => :strong, - 'e' => :emphasis, - 'm' => :monospaced, - 'h' => :header, - 'l' => :literal, - 'v' => :verse, - 'a' => :asciidoc - } + alias [] send - # Public: A Hash mapping alignment abbreviations to alignments (horizontal - # and vertial) that can be applies to a table column or cell - ALIGNMENTS = { - :h => { - '<' => 'left', - '>' => 'right', - '^' => 'center' - }, - :v => { - '<' => 'top', - '>' => 'bottom', - '^' => 'middle' - } - } + # Public: Retrieve the rows grouped by section as a nested Array. + # + # Creates a 2-dimensional array of two element entries. The first element + # is the section name as a symbol. The second element is the Array of rows + # in that section. The entries are in document order (head, foot, body). + # + # Returns a 2-dimentional Array of rows grouped by section. + def by_section + [[:head, @head], [:body, @body], [:foot, @foot]] + end + + # Public: Retrieve the rows as a Hash. + # + # The keys are the names of the section groups and the values are the Array of rows in that section. + # The keys are in document order (head, foot, body). + # + # Returns a Hash of rows grouped by section. + def to_h + { head: @head, body: @body, foot: @foot } + end + end # Public: Get/Set the columns for this table attr_accessor :columns @@ -68,14 +50,17 @@ # Public: Boolean specifies whether this table has a header row attr_accessor :has_header_option + # Public: Get the caption for this table + attr_reader :caption + def initialize parent, attributes super parent, :table @rows = Rows.new @columns = [] - @has_header_option = attributes.key? 'header-option' + @has_header_option = attributes['header-option'] ? true : false - # smell like we need a utility method here + # smells like we need a utility method here # to resolve an integer width from potential bogus input if (pcwidth = attributes['width']) if (pcwidth_intval = pcwidth.to_i) > 100 || pcwidth_intval < 1 @@ -86,12 +71,11 @@ end @attributes['tablepcwidth'] = pcwidth_intval - if @document.attributes.key? 'pagewidth' - @attributes['tableabswidth'] ||= - ((@attributes['tablepcwidth'].to_f / 100) * @document.attributes['pagewidth']).round + if @document.attributes['pagewidth'] + @attributes['tableabswidth'] = (abswidth_val = (((pcwidth_intval / 100.0) * @document.attributes['pagewidth'].to_f).truncate DEFAULT_PRECISION)) == abswidth_val.to_i ? abswidth_val.to_i : abswidth_val end - attributes['orientation'] = 'landscape' if attributes.key? 'rotate-option' + @attributes['orientation'] = 'landscape' if attributes['rotate-option'] end # Internal: Returns whether the current row being processed is @@ -105,14 +89,21 @@ # returns nothing def create_columns colspecs cols = [] + autowidth_cols = nil width_base = 0 colspecs.each do |colspec| - width_base += colspec['width'] + colwidth = colspec['width'] cols << (Column.new self, cols.size, colspec) + if colwidth < 0 + (autowidth_cols ||= []) << cols[-1] + else + width_base += colwidth + end end - unless (@columns = cols).empty? - @attributes['colcount'] = cols.size - assign_column_widths(width_base == 0 ? nil : width_base) + if (num_cols = (@columns = cols).size) > 0 + @attributes['colcount'] = num_cols + width_base = nil unless width_base > 0 || autowidth_cols + assign_column_widths width_base, autowidth_cols end nil end @@ -127,20 +118,32 @@ # width_base - the total of the relative column values used for calculating percentage widths (default: nil) # # returns nothing - def assign_column_widths width_base = nil - pf = 10.0 ** 4 # precision factor (multipler / divisor) for managing precision of calculated result + def assign_column_widths width_base = nil, autowidth_cols = nil + precision = DEFAULT_PRECISION total_width = col_pcwidth = 0 if width_base - @columns.each {|col| total_width += (col_pcwidth = col.assign_width nil, width_base, pf) } + if autowidth_cols + if width_base > 100 + autowidth = 0 + logger.warn %(total column width must not exceed 100% when using autowidth columns; got #{width_base}%) + else + autowidth = ((100.0 - width_base) / autowidth_cols.size).truncate precision + autowidth = autowidth.to_i if autowidth.to_i == autowidth + width_base = 100 + end + autowidth_attrs = { 'width' => autowidth, 'autowidth-option' => '' } + autowidth_cols.each {|col| col.update_attributes autowidth_attrs } + end + @columns.each {|col| total_width += (col_pcwidth = col.assign_width nil, width_base, precision) } else - col_pcwidth = ((100 * pf / @columns.size).to_i) / pf + col_pcwidth = (100.0 / @columns.size).truncate precision col_pcwidth = col_pcwidth.to_i if col_pcwidth.to_i == col_pcwidth - @columns.each {|col| total_width += col.assign_width col_pcwidth } + @columns.each {|col| total_width += col.assign_width col_pcwidth, nil, precision } end - # donate balance, if any, to final column - @columns[-1].assign_width(((100 - total_width + col_pcwidth) * pf).round / pf) unless total_width == 100 + # donate balance, if any, to final column (using half up rounding) + @columns[-1].assign_width(((100 - total_width + col_pcwidth).round precision), nil, precision) unless total_width == 100 nil end @@ -149,7 +152,7 @@ # by the options on the table # # returns nothing - def partition_header_footer(attributes) + def partition_header_footer(attrs) # set rowcount before splitting up body rows @attributes['rowcount'] = @rows.body.size @@ -164,7 +167,7 @@ @rows.head = [head] end - if num_body_rows > 0 && attributes.key?('footer-option') + if num_body_rows > 0 && attrs['footer-option'] @rows.foot = [@rows.body.pop] end @@ -175,11 +178,11 @@ # Public: Methods to manage the columns of an AsciiDoc table. In particular, it # keeps track of the column specs class Table::Column < AbstractNode - # Public: Get/Set the Symbol style for this column. + # Public: Get/Set the style Symbol for this column. attr_accessor :style def initialize table, index, attributes = {} - super table, :column + super table, :table_column @style = attributes['style'] attributes['colnumber'] = index + 1 attributes['width'] ||= 1 @@ -189,31 +192,36 @@ end # Public: An alias to the parent block (which is always a Table) - alias :table :parent + alias table parent # Internal: Calculate and assign the widths (percentage and absolute) for this column # # This method assigns the colpcwidth and colabswidth attributes. # # returns the resolved colpcwidth value - def assign_width col_pcwidth, width_base = nil, pf = 10000.0 + def assign_width col_pcwidth, width_base, precision if width_base - col_pcwidth = ((@attributes['width'].to_f / width_base) * 100 * pf).to_i / pf + col_pcwidth = (@attributes['width'].to_f * 100.0 / width_base).truncate precision col_pcwidth = col_pcwidth.to_i if col_pcwidth.to_i == col_pcwidth end - @attributes['colpcwidth'] = col_pcwidth - if parent.attributes.key? 'tableabswidth' - # FIXME calculate more accurately (only used in DocBook output) - @attributes['colabswidth'] = ((col_pcwidth / 100.0) * parent.attributes['tableabswidth']).round + if parent.attributes['tableabswidth'] + @attributes['colabswidth'] = (col_abswidth = ((col_pcwidth / 100.0) * parent.attributes['tableabswidth']).truncate precision) == col_abswidth.to_i ? col_abswidth.to_i : col_abswidth end - col_pcwidth + @attributes['colpcwidth'] = col_pcwidth + end + + def block? + false + end + + def inline? + false end end # Public: Methods for managing the a cell in an AsciiDoc table. -class Table::Cell < AbstractNode - # Public: Get/Set the Symbol style for this cell (default: nil) - attr_accessor :style +class Table::Cell < AbstractBlock + DOUBLE_LF = LF * 2 # Public: An Integer of the number of columns this cell will span (default: nil) attr_accessor :colspan @@ -222,68 +230,152 @@ attr_accessor :rowspan # Public: An alias to the parent block (which is always a Column) - alias :column :parent + alias column parent - # Public: The internal Asciidoctor::Document for a cell that has the asciidoc style + # Internal: Returns the nested Document in an AsciiDoc table cell (only set when style is :asciidoc) attr_reader :inner_document - def initialize column, text, attributes = {}, cursor = nil - super column, :cell - @text = text - @style = nil - @colspan = nil - @rowspan = nil - # TODO feels hacky + def initialize column, cell_text, attributes = {}, opts = {} + super column, :table_cell + @source_location = opts[:cursor].dup if @document.sourcemap if column - @style = column.attributes['style'] - update_attributes(column.attributes) + cell_style = column.attributes['style'] unless (in_header_row = column.table.header_row?) + # REVIEW feels hacky to inherit all attributes from column + update_attributes column.attributes end + # NOTE if attributes is defined, we know this is a psv cell; implies text needs to be stripped if attributes - @colspan = attributes.delete('colspan') - @rowspan = attributes.delete('rowspan') - # TODO eventualy remove the style attribute from the attributes hash - #@style = attributes.delete('style') if attributes.key? 'style' - @style = attributes['style'] if attributes.key? 'style' - update_attributes(attributes) + if attributes.empty? + @colspan = @rowspan = nil + else + @colspan, @rowspan = (attributes.delete 'colspan'), (attributes.delete 'rowspan') + # TODO delete style attribute from @attributes if set + cell_style = attributes['style'] || cell_style unless in_header_row + update_attributes attributes + end + if cell_style == :asciidoc + asciidoc = true + inner_document_cursor = opts[:cursor] + if (cell_text = cell_text.rstrip).start_with? LF + lines_advanced = 1 + lines_advanced += 1 while (cell_text = cell_text.slice 1, cell_text.length).start_with? LF + # NOTE this only works if we remain in the same file + inner_document_cursor.advance lines_advanced + else + cell_text = cell_text.lstrip + end + elsif cell_style == :literal + literal = true + cell_text = cell_text.rstrip + # QUESTION should we use same logic as :asciidoc cell? strip leading space if text doesn't start with newline? + cell_text = cell_text.slice 1, cell_text.length while cell_text.start_with? LF + else + normal_psv = true + # NOTE AsciidoctorJ uses nil cell_text to create an empty cell + cell_text = cell_text ? cell_text.strip : '' + end + else + @colspan = @rowspan = nil + if cell_style == :asciidoc + asciidoc = true + inner_document_cursor = opts[:cursor] + end end - # only allow AsciiDoc cells in non-header rows - if @style == :asciidoc && !column.table.header_row? + # NOTE only true for non-header rows + if asciidoc # FIXME hide doctitle from nested document; temporary workaround to fix # nested document seeing doctitle and assuming it has its own document title parent_doctitle = @document.attributes.delete('doctitle') # NOTE we need to process the first line of content as it may not have been processed # the included content cannot expect to match conditional terminators in the remaining # lines of table cell content, it must be self-contained logic - inner_document_lines = @text.split(EOL) - unless inner_document_lines.empty? || !inner_document_lines[0].include?('::') - unprocessed_lines = inner_document_lines[0] - processed_lines = PreprocessorReader.new(@document, unprocessed_lines).readlines - if processed_lines != unprocessed_lines + # QUESTION should we reset cell_text to nil? + # QUESTION is is faster to check for :: before splitting? + inner_document_lines = cell_text.split LF, -1 + if (unprocessed_line1 = inner_document_lines[0]).include? '::' + preprocessed_lines = (PreprocessorReader.new @document, [unprocessed_line1]).readlines + unless unprocessed_line1 == preprocessed_lines[0] && preprocessed_lines.size < 2 inner_document_lines.shift - inner_document_lines.unshift(*processed_lines) + inner_document_lines.unshift(*preprocessed_lines) unless preprocessed_lines.empty? end - end - @inner_document = Document.new(inner_document_lines, :header_footer => false, :parent => @document, :cursor => cursor) + end unless inner_document_lines.empty? + @inner_document = Document.new inner_document_lines, standalone: false, parent: @document, cursor: inner_document_cursor @document.attributes['doctitle'] = parent_doctitle unless parent_doctitle.nil? + @subs = nil + elsif literal + @content_model = :verbatim + @subs = BASIC_SUBS + else + if normal_psv && (cell_text.start_with? '[[') && LeadingInlineAnchorRx =~ cell_text + Parser.catalog_inline_anchor $1, $2, self, opts[:cursor], @document + end + @content_model = :simple + @subs = NORMAL_SUBS end + @text = cell_text + @style = cell_style end - # Public: Get the text with normal substitutions applied for this cell. Used for cells in the head rows + # Public: Get the String text of this cell with substitutions applied. + # + # Used for cells in the head row as well as text-only (non-AsciiDoc) cells in + # the foot row and body. + # + # This method shouldn't be used for cells that have the AsciiDoc style. + # + # Returns the converted String text for this Cell def text - apply_normal_subs(@text).strip + apply_subs @text, @subs + end + + # Public: Set the String text. + # + # This method shouldn't be used for cells that have the AsciiDoc style. + # + # Returns the new String text assigned to this Cell + def text= val + @text = val end # Public: Handles the body data (tbody, tfoot), applying styles and partitioning into paragraphs + # + # This method should not be used for cells in the head row or that have the literal or verse style. + # + # Returns the converted String for this Cell def content - if @style == :asciidoc + if (cell_style = @style) == :asciidoc @inner_document.convert - else - text.split(BlankLineRx).map do |p| - !@style || @style == :header ? p : Inline.new(parent, :quoted, p, :type => @style).convert + elsif @text.include? DOUBLE_LF + (text.split BlankLineRx).map do |para| + cell_style && cell_style != :header ? (Inline.new parent, :quoted, para, type: cell_style).convert : para end + elsif (subbed_text = text).empty? + [] + elsif cell_style && cell_style != :header + [(Inline.new parent, :quoted, subbed_text, type: cell_style).convert] + else + [subbed_text] end end + def lines + @text.split LF + end + + def source + @text + end + + # Public: Get the source file where this block started + def file + @source_location && @source_location.file + end + + # Public: Get the source line number where this block started + def lineno + @source_location && @source_location.lineno + end + def to_s "#{super.to_s} - [text: #@text, colspan: #{@colspan || 1}, rowspan: #{@rowspan || 1}, attributes: #@attributes]" end @@ -296,11 +388,26 @@ # instantiated, the row is closed if the cell satisifies the column count and, # finally, a new buffer is allocated to track the next cell. class Table::ParserContext + include Logging + + # Public: An Array of String keys that represent the table formats in AsciiDoc + #-- + # QUESTION should we recognize !sv as a valid format value? + FORMATS = ['psv', 'csv', 'dsv', 'tsv'].to_set + + # Public: A Hash mapping the AsciiDoc table formats to default delimiters + DELIMITERS = { + 'psv' => ['|', /\|/], + 'csv' => [',', /,/], + 'dsv' => [':', /:/], + 'tsv' => [?\t, /\t/], + '!sv' => ['!', /!/], + } # Public: The Table currently being parsed attr_accessor :table - # Public: The AsciiDoc table format (psv, dsv or csv) + # Public: The AsciiDoc table format (psv, dsv, or csv) attr_accessor :format # Public: Get the expected column count for a row @@ -319,25 +426,39 @@ # Public: The cell delimiter compiled Regexp for this table. attr_reader :delimiter_re - def initialize(reader, table, attributes = {}) - @reader = reader + def initialize reader, table, attributes = {} + @start_cursor_data = (@reader = reader).mark @table = table - # TODO if reader.cursor becomes a reference, this would require .dup - @last_cursor = reader.cursor - if (@format = attributes['format']) - unless Table::DATA_FORMATS.include? @format - raise %(Illegal table format: #{@format}) + + if attributes.key? 'format' + if FORMATS.include?(xsv = attributes['format']) + if xsv == 'tsv' + # NOTE tsv is just an alias for csv with a tab separator + @format = 'csv' + elsif (@format = xsv) == 'psv' && table.document.nested? + xsv = '!sv' + end + else + logger.error message_with_context %(illegal table format: #{xsv}), source_location: reader.cursor_at_prev_line + @format, xsv = 'psv', (table.document.nested? ? '!sv' : 'psv') end else - @format = Table::DEFAULT_DATA_FORMAT + @format, xsv = 'psv', (table.document.nested? ? '!sv' : 'psv') end - @delimiter = if @format == 'psv' && !(attributes.key? 'separator') && table.document.nested? - '!' + if attributes.key? 'separator' + if (sep = attributes['separator']).nil_or_empty? + @delimiter, @delimiter_rx = DELIMITERS[xsv] + # QUESTION should we support any other escape codes or multiple tabs? + elsif sep == '\t' + @delimiter, @delimiter_rx = DELIMITERS['tsv'] + else + @delimiter, @delimiter_rx = sep, /#{::Regexp.escape sep}/ + end else - attributes['separator'] || Table::DEFAULT_DELIMITERS[@format] + @delimiter, @delimiter_rx = DELIMITERS[xsv] end - @delimiter_re = /#{Regexp.escape @delimiter}/ + @colcount = table.columns.empty? ? -1 : table.columns.size @buffer = '' @cellspecs = [] @@ -361,34 +482,41 @@ # # returns Regexp MatchData if the line contains the delimiter, false otherwise def match_delimiter(line) - @delimiter_re.match(line) + @delimiter_rx.match(line) + end + + # Public: Skip past the matched delimiter because it's inside quoted text. + # + # Returns nothing + def skip_past_delimiter(pre) + @buffer = %(#{@buffer}#{pre}#{@delimiter}) + nil end - # Public: Skip beyond the matched delimiter because it was a false positive - # (either because it was escaped or in a quoted context) + # Public: Skip past the matched delimiter because it's escaped. # - # returns the String after the match - def skip_matched_delimiter(match, escaped = false) - @buffer = %(#{@buffer}#{escaped ? match.pre_match.chop : match.pre_match}#{@delimiter}) - match.post_match + # Returns nothing + def skip_past_escaped_delimiter(pre) + @buffer = %(#{@buffer}#{pre.chop}#{@delimiter}) + nil end # Public: Determines whether the buffer has unclosed quotes. Used for CSV data. # # returns true if the buffer has unclosed quotes, false if it doesn't or it # isn't quoted data - def buffer_has_unclosed_quotes?(append = nil) - record = %(#{@buffer}#{append}).strip - record.start_with?('"') && !record.start_with?('""') && !record.end_with?('"') - end - - # Public: Determines whether the buffer contains quoted data. Used for CSV data. - # - # returns true if the buffer starts with a double quote (and not an escaped double quote), - # false otherwise - def buffer_quoted? - @buffer = @buffer.lstrip - @buffer.start_with?('"') && !@buffer.start_with?('""') + def buffer_has_unclosed_quotes? append = nil + if (record = append ? (@buffer + append).strip : @buffer.strip) == '"' + true + elsif record.start_with? '"' + if ((trailing_quote = record.end_with? '"') && (record.end_with? '""')) || (record.start_with? '""') + ((record = record.gsub '""', '').start_with? '"') && !(record.end_with? '"') + else + !trailing_quote + end + else + false + end end # Public: Takes a cell spec from the stack. Cell specs precede the delimiter, so a @@ -460,30 +588,35 @@ # # returns nothing def close_cell(eol = false) - cell_text = @buffer.strip - @buffer = '' if @format == 'psv' - cellspec = take_cellspec - if cellspec + cell_text = @buffer + @buffer = '' + if (cellspec = take_cellspec) repeat = cellspec.delete('repeatcol') || 1 else - warn %(asciidoctor: ERROR: #{@last_cursor.line_info}: table missing leading separator, recovering automatically) + logger.error message_with_context 'table missing leading separator; recovering automatically', source_location: Reader::Cursor.new(*@start_cursor_data) cellspec = {} repeat = 1 end else + cell_text = @buffer.strip + @buffer = '' cellspec = nil repeat = 1 - if @format == 'csv' - if !cell_text.empty? && cell_text.include?('"') - # this may not be perfect logic, but it hits the 99% - if cell_text.start_with?('"') && cell_text.end_with?('"') - # unquote - cell_text = cell_text[1...-1].strip + if @format == 'csv' && !cell_text.empty? && cell_text.include?('"') + # this may not be perfect logic, but it hits the 99% + if cell_text.start_with?('"') && cell_text.end_with?('"') + # unquote + if (cell_text = cell_text.slice(1, cell_text.length - 2)) + # trim whitespace and collapse escaped quotes + cell_text = cell_text.strip.squeeze('"') + else + logger.error message_with_context 'unclosed quote in CSV data; setting cell to empty', source_location: @reader.cursor_at_prev_line + cell_text = '' end - - # collapses escaped quotes - cell_text = cell_text.tr_s('"', '"') + else + # collapse escaped quotes + cell_text = cell_text.squeeze('"') end end end @@ -501,13 +634,13 @@ else # QUESTION is this right for cells that span columns? unless (column = @table.columns[@current_row.size]) - warn %(asciidoctor: ERROR: #{@last_cursor.line_info}: dropping cell because it exceeds specified number of columns) + logger.error message_with_context 'dropping cell because it exceeds specified number of columns', source_location: @reader.cursor_before_mark return end end - cell = Table::Cell.new(column, cell_text, cellspec, @last_cursor) - @last_cursor = @reader.cursor + cell = Table::Cell.new(column, cell_text, cellspec, cursor: @reader.cursor_before_mark) + @reader.mark unless !cell.rowspan || cell.rowspan == 1 activate_rowspan(cell.rowspan, (cell.colspan || 1)) end @@ -521,7 +654,9 @@ nil end - # Public: Close the row by adding it to the Table and resetting the row + private + + # Internal: Close the row by adding it to the Table and resetting the row # Array and counter variables. # # returns nothing @@ -537,24 +672,21 @@ nil end - # Public: Activate a rowspan. The rowspan Array is consulted when + # Internal: Activate a rowspan. The rowspan Array is consulted when # determining the effective number of cells in the current row. # # returns nothing def activate_rowspan(rowspan, colspan) - 1.upto(rowspan - 1).each {|i| - # longhand assignment used for Opal compatibility - @active_rowspans[i] = (@active_rowspans[i] || 0) + colspan - } + 1.upto(rowspan - 1) {|i| @active_rowspans[i] = (@active_rowspans[i] || 0) + colspan } nil end - # Public: Check whether we've met the number of effective columns for the current row. + # Internal: Check whether we've met the number of effective columns for the current row. def end_of_row? @colcount == -1 || effective_column_visits == @colcount end - # Public: Calculate the effective column visits, which consists of the number of + # Internal: Calculate the effective column visits, which consists of the number of # cells plus any active rowspans. def effective_column_visits @column_visits + @active_rowspans[0] @@ -565,6 +697,5 @@ def advance @linenum += 1 end - end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/timings.rb asciidoctor-2.0.10/lib/asciidoctor/timings.rb --- asciidoctor-1.5.5/lib/asciidoctor/timings.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/timings.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,4 @@ -# encoding: UTF-8 +# frozen_string_literal: true module Asciidoctor class Timings def initialize @@ -7,27 +7,44 @@ end def start key - @timers[key] = ::Time.now + @timers[key] = now end def record key - @log[key] = (::Time.now - (@timers.delete key)) + @log[key] = (now - (@timers.delete key)) + end + + def time *keys + time = keys.reduce(0) {|sum, key| sum + (@log[key] || 0) } + time > 0 ? time : nil + end + + def read + time :read + end + + def parse + time :parse end def read_parse - (time = (@log[:read] || 0) + (@log[:parse] || 0)) > 0 ? time : nil + time :read, :parse end def convert - @log[:convert] || 0 + time :convert end def read_parse_convert - (time = (@log[:read] || 0) + (@log[:parse] || 0) + (@log[:convert] || 0)) > 0 ? time : nil + time :read, :parse, :convert + end + + def write + time :write end def total - (time = (@log[:read] || 0) + (@log[:parse] || 0) + (@log[:convert] || 0) + (@log[:write] || 0)) > 0 ? time : nil + time :read, :parse, :convert, :write end def print_report to = $stdout, subject = nil @@ -36,5 +53,18 @@ to.puts %( Time to convert document: #{'%05.5f' % convert.to_f}) to.puts %( Total time (read, parse and convert): #{'%05.5f' % read_parse_convert.to_f}) end + + private + + if (::Process.const_defined? :CLOCK_MONOTONIC, false) && (defined? ::Process.clock_gettime) == 'method' + CLOCK_ID = ::Process::CLOCK_MONOTONIC + def now + ::Process.clock_gettime CLOCK_ID + end + else + def now + ::Time.now + end + end end end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/version.rb asciidoctor-2.0.10/lib/asciidoctor/version.rb --- asciidoctor-1.5.5/lib/asciidoctor/version.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/version.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,3 +1,4 @@ +# frozen_string_literal: true module Asciidoctor - VERSION = '1.5.5' + VERSION = '2.0.10' end diff -Nru asciidoctor-1.5.5/lib/asciidoctor/writer.rb asciidoctor-2.0.10/lib/asciidoctor/writer.rb --- asciidoctor-1.5.5/lib/asciidoctor/writer.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor/writer.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,30 @@ +# frozen_string_literal: true +module Asciidoctor +# A module that can be used to mix the {#write} method into a {Converter} implementation to allow the converter to +# control how the output is written to disk. +module Writer + # Public: Writes the output to the specified target file name or stream. + # + # output - The output String to write + # target - The String file name or stream object to which the output should be written. + # + # Returns nothing + def write output, target + if target.respond_to? :write + # ensure there's a trailing newline to be nice to terminals + target.write output.chomp + LF + else + # QUESTION shouldn't we ensure a trailing newline here too? + ::File.write target, output, mode: FILE_WRITE_MODE + end + nil + end +end + +module VoidWriter + include Writer + + # Public: Does not write output + def write output, target; end +end +end diff -Nru asciidoctor-1.5.5/lib/asciidoctor.rb asciidoctor-2.0.10/lib/asciidoctor.rb --- asciidoctor-1.5.5/lib/asciidoctor.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/lib/asciidoctor.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,88 +1,83 @@ -# encoding: UTF-8 -RUBY_ENGINE = 'unknown' unless defined? RUBY_ENGINE -RUBY_ENGINE_OPAL = (RUBY_ENGINE == 'opal') -RUBY_ENGINE_JRUBY = (RUBY_ENGINE == 'jruby') -RUBY_MIN_VERSION_1_9 = (RUBY_VERSION >= '1.9') -RUBY_MIN_VERSION_2 = (RUBY_VERSION >= '2') - +# frozen_string_literal: true require 'set' -# NOTE RUBY_ENGINE == 'opal' conditional blocks are filtered by the Opal preprocessor +# NOTE RUBY_ENGINE == 'opal' conditional blocks like this are filtered by the Opal preprocessor if RUBY_ENGINE == 'opal' - # NOTE asciidoctor/opal_ext is supplied by the Asciidoctor.js build - require 'asciidoctor/opal_ext' + # this require is satisfied by the Asciidoctor.js build; it augments the Ruby environment for Asciidoctor.js + require 'asciidoctor/js' else autoload :Base64, 'base64' + require 'cgi/util' autoload :OpenURI, 'open-uri' + autoload :Pathname, 'pathname' autoload :StringScanner, 'strscan' + autoload :URI, 'uri' end -# ideally we should use require_relative instead of modifying the LOAD_PATH -$:.unshift File.dirname __FILE__ - -# Public: Methods for parsing AsciiDoc input files and converting documents -# using eRuby templates. -# -# AsciiDoc documents comprise a header followed by zero or more sections. -# Sections are composed of blocks of content. For example: -# -# = Doc Title +# Public: The main application interface (API) for Asciidoctor. This API provides methods to parse AsciiDoc content and +# convert it to various output formats using built-in or third-party converters or Tilt-supported templates. # -# == Section 1 +# An AsciiDoc document can be as simple as a single line of content, though it more commonly starts with a document +# header that declares the document title and document attribute definitions. The document header is then followed by +# zero or more section titles, optionally nested, to organize the paragraphs, blocks, lists, etc. of the document. # -# This is a paragraph block in the first section. +# By default, the processor converts the AsciiDoc document to HTML 5 using a built-in converter. However, this behavior +# can be changed by specifying a different backend (e.g., +docbook+). A backend is a keyword for an output format (e.g., +# DocBook). That keyword, in turn, is used to select a converter, which carries out the request to convert the document +# to that format. # -# == Section 2 +# In addition to this API, Asciidoctor also provides a command-line interface (CLI) named +asciidoctor+ for converting +# AsciiDoc content. See the provided man(ual) page for usage and options. # -# This section has a paragraph block and an olist block. +# Examples # -# . Item 1 -# . Item 2 +# # Convert an AsciiDoc file +# Asciidoctor.convert_file 'document.adoc', safe: :safe # -# Examples: +# # Convert an AsciiDoc string +# puts Asciidoctor.convert "I'm using *Asciidoctor* version {asciidoctor-version}.", safe: :safe # -# Use built-in converter: +# # Convert an AsciiDoc file using Tilt-supported templates +# Asciidoctor.convert_file 'document.adoc', safe: :safe, template_dir: '/path/to/templates' # -# Asciidoctor.convert_file 'sample.adoc' +# # Parse an AsciiDoc file into a document object +# doc = Asciidoctor.load_file 'document.adoc', safe: :safe # -# Use custom (Tilt-supported) templates: -# -# Asciidoctor.convert_file 'sample.adoc', :template_dir => 'path/to/templates' +# # Parse an AsciiDoc string into a document object +# doc = Asciidoctor.load "= Document Title\n\nfirst paragraph\n\nsecond paragraph", safe: :safe # module Asciidoctor - - # alias the RUBY_ENGINE constant inside the Asciidoctor namespace - RUBY_ENGINE = ::RUBY_ENGINE + # alias the RUBY_ENGINE constant inside the Asciidoctor namespace and define a precomputed alias for runtime + RUBY_ENGINE_OPAL = (RUBY_ENGINE = ::RUBY_ENGINE) == 'opal' module SafeMode - # A safe mode level that disables any of the security features enforced # by Asciidoctor (Ruby is still subject to its own restrictions). UNSAFE = 0; # A safe mode level that closely parallels safe mode in AsciiDoc. This value # prevents access to files which reside outside of the parent directory of - # the source file and disables any macro other than the include::[] macro. + # the source file and disables any macro other than the include::[] directive. SAFE = 1; # A safe mode level that disallows the document from setting attributes # that would affect the conversion of the document, in addition to all the - # security features of SafeMode::SAFE. For instance, this level disallows - # changing the backend or the source-highlighter using an attribute defined - # in the source document. This is the most fundamental level of security - # for server-side deployments (hence the name). + # security features of SafeMode::SAFE. For instance, this level forbids + # changing the backend or source-highlighter using an attribute defined + # in the source document header. This is the most fundamental level of + # security for server deployments (hence the name). SERVER = 10; # A safe mode level that disallows the document from attempting to read # files from the file system and including the contents of them into the # document, in additional to all the security features of SafeMode::SERVER. - # For instance, this level disallows use of the include::[] macro and the + # For instance, this level disallows use of the include::[] directive and the # embedding of binary content (data uri), stylesheets and JavaScripts # referenced by the document.(Asciidoctor and trusted extensions may still # be allowed to embed trusted content into the document). # # Since Asciidoctor is aiming for wide adoption, this level is the default - # and is recommended for server-side deployments. + # and is recommended for server deployments. SECURE = 20; # A planned safe mode level that disallows the use of passthrough macros and @@ -93,21 +88,34 @@ # enforced)! #PARANOID = 100; + @names_by_value = {}.tap {|accum| (constants false).each {|sym| accum[const_get sym, false] = sym.to_s.downcase } } + + def self.value_for_name name + const_get name.upcase, false + end + + def self.name_for_value value + @names_by_value[value] + end + + def self.names + @names_by_value.values + end end # Flags to control compliance with the behavior of AsciiDoc module Compliance @keys = ::Set.new class << self - attr :keys - end + attr_reader :keys - # Defines a new compliance key and assigns an initial value. - def self.define key, value - instance_variable_set %(@#{key}), value - class << self; self; end.send :attr_accessor, key - @keys << key - nil + # Defines a new compliance key and assigns an initial value. + def define key, value + instance_variable_set %(@#{key}), value + singleton_class.send :attr_accessor, key + @keys << key + nil + end end # AsciiDoc terminates paragraphs adjacent to @@ -117,22 +125,14 @@ # Compliance value: true define :block_terminates_paragraph, true - # AsciiDoc does not treat paragraphs labeled with a verbatim style - # (literal, listing, source, verse) as verbatim + # AsciiDoc does not parse paragraphs with a verbatim style + # (i.e., literal, listing, source, verse) as verbatim content. # This options allows this behavior to be modified # Compliance value: false define :strict_verbatim_paragraphs, true - # NOT CURRENTLY USED - # AsciiDoc allows start and end delimiters around - # a block to be different lengths - # Enabling this option requires matching lengths - # Compliance value: false - #define :congruent_block_delimiters, true - - # AsciiDoc supports both single-line and underlined - # section titles. - # This option disables the underlined variant. + # AsciiDoc supports both atx (single-line) and setext (underlined) section titles. + # This option can be used to disable the setext variant. # Compliance value: true define :underline_style_section_titles, true @@ -142,7 +142,9 @@ define :unwrap_standalone_preamble, true # AsciiDoc drops lines that contain references to missing attributes. - # This behavior is not intuitive to most writers + # This behavior is not intuitive to most writers. + # Asciidoctor allows this behavior to be configured. + # Possible options are 'skip', 'drop', 'drop-line', and 'warn'. # Compliance value: 'drop-line' define :attribute_missing, 'skip' @@ -156,6 +158,11 @@ # Compliance value: false define :shorthand_property_syntax, true + # Asciidoctor will attempt to resolve the target of a cross reference by + # matching its reference text (reftext or title) (e.g., <
    >) + # Compliance value: false + define :natural_xrefs, true + # Asciidoctor will start counting at the following number # when creating a unique id when there is a conflict # Compliance value: 2 @@ -168,52 +175,47 @@ define :markdown_syntax, true end - # The absolute root path of the Asciidoctor RubyGem - ROOT_PATH = ::File.dirname ::File.dirname ::File.expand_path __FILE__ + # The absolute root directory of the Asciidoctor RubyGem + ROOT_DIR = ::File.dirname ::File.absolute_path __dir__ unless defined? ROOT_DIR - # The absolute lib path of the Asciidoctor RubyGem - LIB_PATH = ::File.join ROOT_PATH, 'lib' + # The absolute lib directory of the Asciidoctor RubyGem + LIB_DIR = ::File.join ROOT_DIR, 'lib' - # The absolute data path of the Asciidoctor RubyGem - DATA_PATH = ::File.join ROOT_PATH, 'data' + # The absolute data directory of the Asciidoctor RubyGem + DATA_DIR = ::File.join ROOT_DIR, 'data' # The user's home directory, as best we can determine it - # NOTE not using infix rescue for performance reasons, see: https://github.com/jruby/jruby/issues/1816 - begin - USER_HOME = ::Dir.home - rescue - USER_HOME = ::ENV['HOME'] || ::Dir.pwd - end + # IMPORTANT this rescue is required for running Asciidoctor on GitHub.com + USER_HOME = ::Dir.home rescue (::ENV['HOME'] || ::Dir.pwd) - # Flag to indicate whether encoding can be coerced to UTF-8 - # _All_ input data must be force encoded to UTF-8 if Encoding.default_external is *not* UTF-8 - # Addresses failures performing string operations that are reported as "invalid byte sequence in US-ASCII" - # Ruby 1.8 doesn't seem to experience this problem (perhaps because it isn't validating the encodings) - COERCE_ENCODING = !::RUBY_ENGINE_OPAL && ::RUBY_MIN_VERSION_1_9 + # The newline character used for output; stored in constant table as an optimization + LF = ?\n - # Flag to indicate whether encoding of external strings needs to be forced to UTF-8 - FORCE_ENCODING = COERCE_ENCODING && ::Encoding.default_external != ::Encoding::UTF_8 + # The null character to use for splitting attribute values + NULL = ?\0 - # Byte arrays for UTF-* Byte Order Marks - # hex escape sequence used for Ruby 1.8 compatibility - BOM_BYTES_UTF_8 = "\xef\xbb\xbf".bytes.to_a - BOM_BYTES_UTF_16LE = "\xff\xfe".bytes.to_a - BOM_BYTES_UTF_16BE = "\xfe\xff".bytes.to_a + # String for matching tab character + TAB = ?\t - # Flag to indicate that line length should be calculated using a unicode mode hint - FORCE_UNICODE_LINE_LENGTH = !::RUBY_MIN_VERSION_1_9 + # Maximum integer value for "boundless" operations; equal to MAX_SAFE_INTEGER in JavaScript + MAX_INT = 9007199254740991 - # Flag to indicate whether gsub can use a Hash to map matches to replacements - SUPPORTS_GSUB_RESULT_HASH = ::RUBY_MIN_VERSION_1_9 && !::RUBY_ENGINE_OPAL + # Alias UTF_8 encoding for convenience / speed + UTF_8 = ::Encoding::UTF_8 - # The endline character used for output; stored in constant table as an optimization - EOL = "\n" + # Byte arrays for UTF-* Byte Order Marks + BOM_BYTES_UTF_8 = [0xef, 0xbb, 0xbf] + BOM_BYTES_UTF_16LE = [0xff, 0xfe] + BOM_BYTES_UTF_16BE = [0xfe, 0xff] - # The null character to use for splitting attribute values - NULL = "\0" + # The mode to use when opening a file for reading + FILE_READ_MODE = RUBY_ENGINE_OPAL ? 'r' : 'rb:utf-8:utf-8' - # String for matching tab character - TAB = "\t" + # The mode to use when opening a URI for reading + URI_READ_MODE = FILE_READ_MODE + + # The mode to use when opening a file for writing + FILE_WRITE_MODE = RUBY_ENGINE_OPAL ? 'w' : 'w:utf-8' # The default document type # Can influence markup generated by the converters @@ -228,7 +230,7 @@ # Pointers to the preferred version for a given backend. BACKEND_ALIASES = { - 'html' => 'html5', + 'html' => 'html5', 'docbook' => 'docbook5' } @@ -243,20 +245,22 @@ 'docbook' => '.xml', 'pdf' => '.pdf', 'epub' => '.epub', + 'manpage' => '.man', 'asciidoc' => '.adoc' } - # Set of file extensions recognized as AsciiDoc documents (stored as a truth hash) + # A map of file extensions that are recognized as AsciiDoc documents + # TODO .txt should be deprecated ASCIIDOC_EXTENSIONS = { - '.asciidoc' => true, '.adoc' => true, - '.ad' => true, + '.asciidoc' => true, '.asc' => true, + '.ad' => true, # TODO .txt should be deprecated '.txt' => true } - SECTION_LEVELS = { + SETEXT_SECTION_LEVELS = { '=' => 0, '-' => 1, '~' => 2, @@ -266,37 +270,47 @@ ADMONITION_STYLES = ['NOTE', 'TIP', 'IMPORTANT', 'WARNING', 'CAUTION'].to_set - PARAGRAPH_STYLES = ['comment', 'example', 'literal', 'listing', 'normal', 'pass', 'quote', 'sidebar', 'source', 'verse', 'abstract', 'partintro'].to_set + ADMONITION_STYLE_HEADS = ::Set.new.tap {|accum| ADMONITION_STYLES.each {|s| accum << s.chr } } + + PARAGRAPH_STYLES = ['comment', 'example', 'literal', 'listing', 'normal', 'open', 'pass', 'quote', 'sidebar', 'source', 'verse', 'abstract', 'partintro'].to_set VERBATIM_STYLES = ['literal', 'listing', 'source', 'verse'].to_set DELIMITED_BLOCKS = { - '--' => [:open, ['comment', 'example', 'literal', 'listing', 'pass', 'quote', 'sidebar', 'source', 'verse', 'admonition', 'abstract', 'partintro'].to_set], + '--' => [:open, ['comment', 'example', 'literal', 'listing', 'pass', 'quote', 'sidebar', 'source', 'verse', 'admonition', 'abstract', 'partintro'].to_set], '----' => [:listing, ['literal', 'source'].to_set], '....' => [:literal, ['listing', 'source'].to_set], '====' => [:example, ['admonition'].to_set], '****' => [:sidebar, ::Set.new], '____' => [:quote, ['verse'].to_set], - '""' => [:quote, ['verse'].to_set], '++++' => [:pass, ['stem', 'latexmath', 'asciimath'].to_set], '|===' => [:table, ::Set.new], ',===' => [:table, ::Set.new], ':===' => [:table, ::Set.new], '!===' => [:table, ::Set.new], '////' => [:comment, ::Set.new], - '```' => [:fenced_code, ::Set.new] + '```' => [:fenced_code, ::Set.new] } - DELIMITED_BLOCK_LEADERS = DELIMITED_BLOCKS.keys.map {|key| key[0..1] }.to_set + DELIMITED_BLOCK_HEADS = {}.tap {|accum| DELIMITED_BLOCKS.each_key {|k| accum[k.slice 0, 2] = true } } + DELIMITED_BLOCK_TAILS = {}.tap {|accum| DELIMITED_BLOCKS.each_key {|k| accum[k] = k[k.length - 1] if k.length == 4 } } + + # NOTE the 'figure' key as a string is historical and used by image blocks + CAPTION_ATTR_NAMES = { example: 'example-caption', 'figure' => 'figure-caption', listing: 'listing-caption', table: 'table-caption' } - LAYOUT_BREAK_LINES = { + LAYOUT_BREAK_CHARS = { '\'' => :thematic_break, - '-' => :thematic_break, - '*' => :thematic_break, - '_' => :thematic_break, - '<' => :page_break + '<' => :page_break } + MARKDOWN_THEMATIC_BREAK_CHARS = { + '-' => :thematic_break, + '*' => :thematic_break, + '_' => :thematic_break + } + + HYBRID_LAYOUT_BREAK_CHARS = LAYOUT_BREAK_CHARS.merge MARKDOWN_THEMATIC_BREAK_CHARS + #LIST_CONTEXTS = [:ulist, :olist, :dlist, :colist] NESTABLE_LIST_CONTEXTS = [:ulist, :olist, :dlist] @@ -305,925 +319,148 @@ ORDERED_LIST_STYLES = [:arabic, :loweralpha, :lowerroman, :upperalpha, :upperroman] #, :lowergreek] ORDERED_LIST_KEYWORDS = { + #'arabic' => '1', + #'decimal' => '1', 'loweralpha' => 'a', 'lowerroman' => 'i', + #'lowergreek' => 'a', 'upperalpha' => 'A', 'upperroman' => 'I' - #'lowergreek' => 'a' - #'arabic' => '1' - #'decimal' => '1' } + ATTR_REF_HEAD = '{' + LIST_CONTINUATION = '+' - # NOTE AsciiDoc Python recognizes both a preceding TAB and a space - LINE_BREAK = ' +' + # NOTE AsciiDoc Python allows + to be preceded by TAB; Asciidoctor does not + HARD_LINE_BREAK = ' +' LINE_CONTINUATION = ' \\' LINE_CONTINUATION_LEGACY = ' +' BLOCK_MATH_DELIMITERS = { - :asciimath => ['\\$', '\\$'], - :latexmath => ['\\[', '\\]'], + asciimath: ['\$', '\$'], + latexmath: ['\[', '\]'], } INLINE_MATH_DELIMITERS = { - :asciimath => ['\\$', '\\$'], - :latexmath => ['\\(', '\\)'], + asciimath: ['\$', '\$'], + latexmath: ['\(', '\)'], } - # attributes which be changed within the content of the document (but not - # header) because it has semantic meaning; ex. sectnums - FLEXIBLE_ATTRIBUTES = %w(sectnums) + (STEM_TYPE_ALIASES = { + 'latexmath' => 'latexmath', + 'latex' => 'latexmath', + 'tex' => 'latexmath' + }).default = 'asciimath' - # A collection of regular expressions used by the parser. - # - # NOTE: The following pattern, which appears frequently, captures the - # contents between square brackets, ignoring escaped closing brackets - # (closing brackets prefixed with a backslash '\' character) - # - # Pattern: (?:\[((?:\\\]|[^\]])*?)\]) - # Matches: [enclosed text here] or [enclosed [text\] here] - # - #(pseudo)module Rx - - ## Regular expression character classes (to ensure regexp compatibility between Ruby and JavaScript) - ## CC stands for "character class", CG stands for "character class group" - - # NOTE \w matches only the ASCII word characters, whereas [[:word:]] or \p{Word} matches any character in the Unicode word category. - - # character classes for the Regexp engine(s) in JavaScript - if RUBY_ENGINE == 'opal' - CC_ALPHA = 'a-zA-Z' - CG_ALPHA = '[a-zA-Z]' - CC_ALNUM = 'a-zA-Z0-9' - CG_ALNUM = '[a-zA-Z0-9]' - CG_BLANK = '[ \\t]' - CC_EOL = '(?=\\n|$)' - CG_GRAPH = '[\\x21-\\x7E]' # non-blank character - CC_ALL = '[\s\S]' # any character, including newlines (alternatively, [^]) - CC_WORD = 'a-zA-Z0-9_' - CG_WORD = '[a-zA-Z0-9_]' - # character classes for the Regexp engine in Ruby >= 2 (Ruby 1.9 supports \p{} but has problems w/ encoding) - elsif ::RUBY_MIN_VERSION_2 - CC_ALPHA = CG_ALPHA = '\p{Alpha}' - CC_ALNUM = CG_ALNUM = '\p{Alnum}' - CC_ALL = '.' - CG_BLANK = '\p{Blank}' - CC_EOL = '$' - CG_GRAPH = '\p{Graph}' - CC_WORD = CG_WORD = '\p{Word}' - # character classes for the Regexp engine in Ruby < 2 - else - CC_ALPHA = '[:alpha:]' - CG_ALPHA = '[[:alpha:]]' - CC_ALL = '.' - CC_ALNUM = '[:alnum:]' - CG_ALNUM = '[[:alnum:]]' - CG_BLANK = '[[:blank:]]' - CC_EOL = '$' - CG_GRAPH = '[[:graph:]]' # non-blank character - if ::RUBY_MIN_VERSION_1_9 - CC_WORD = '[:word:]' - CG_WORD = '[[:word:]]' - else - # NOTE Ruby 1.8 cannot match word characters beyond the ASCII range; if you need this feature, upgrade! - CC_WORD = '[:alnum:]_' - CG_WORD = '[[:alnum:]_]' - end - end + FONT_AWESOME_VERSION = '4.7.0' - ## Document header + HIGHLIGHT_JS_VERSION = '9.15.6' - # Matches the author info line immediately following the document title. - # - # Examples - # - # Doc Writer - # Mary_Sue Brontë - # - AuthorInfoLineRx = /^(#{CG_WORD}[#{CC_WORD}\-'.]*)(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +<([^>]+)>)?$/ + MATHJAX_VERSION = '2.7.5' - # Matches the revision info line, which appears immediately following - # the author info line beneath the document title. - # - # Examples - # - # v1.0 - # 2013-01-01 - # v1.0, 2013-01-01: Ring in the new year release - # 1.0, Jan 01, 2013 - # - RevisionInfoLineRx = /^(?:\D*(.*?),)?(?:\s*(?!:)(.*?))(?:\s*(?!^):\s*(.*))?$/ - - # Matches the title and volnum in the manpage doctype. - # - # Examples - # - # = asciidoctor ( 1 ) - # - ManpageTitleVolnumRx = /^(.*)\((.*)\)$/ - - # Matches the name and purpose in the manpage doctype. - # - # Examples - # - # asciidoctor - converts AsciiDoc source files to HTML, DocBook and other formats - # - ManpageNamePurposeRx = /^(.*?)#{CG_BLANK}+-#{CG_BLANK}+(.*)$/ - - ## Preprocessor directives - - # Matches a conditional preprocessor directive (e.g., ifdef, ifndef, ifeval and endif). - # - # Examples - # - # ifdef::basebackend-html[] - # ifndef::theme[] - # ifeval::["{asciidoctor-version}" >= "0.1.0"] - # ifdef::asciidoctor[Asciidoctor!] - # endif::theme[] - # endif::basebackend-html[] - # endif::[] - # - ConditionalDirectiveRx = /^\\?(ifdef|ifndef|ifeval|endif)::(\S*?(?:([,\+])\S+?)?)\[(.+)?\]$/ - - # Matches a restricted (read as safe) eval expression. - # - # Examples - # - # "{asciidoctor-version}" >= "0.1.0" - # - EvalExpressionRx = /^(\S.*?)#{CG_BLANK}*(==|!=|<=|>=|<|>)#{CG_BLANK}*(\S.*)$/ - - # Matches an include preprocessor directive. - # - # Examples - # - # include::chapter1.ad[] - # include::example.txt[lines=1;2;5..10] - # - IncludeDirectiveRx = /^\\?include::([^\[]+)\[(.*?)\]$/ - - # Matches a trailing tag directive in an include file. - # - # Examples - # - # // tag::try-catch[] - # try { - # someMethod(); - # catch (Exception e) { - # log(e); - # } - # // end::try-catch[] - TagDirectiveRx = /\b(?:tag|end)::\S+\[\]$/ - - ## Attribute entries and references - - # Matches a document attribute entry. - # - # Examples - # - # :foo: bar - # :First Name: Dan - # :sectnums!: - # :!toc: - # :long-entry: Attribute value lines ending in ' +' - # are joined together as a single value, - # collapsing the line breaks and indentation to - # a single space. - # - AttributeEntryRx = /^:(!?\w.*?):(?:#{CG_BLANK}+(.*))?$/ - - # Matches invalid characters in an attribute name. - InvalidAttributeNameCharsRx = /[^\w\-]/ - - # Matches the pass inline macro allowed in value of attribute assignment. - # - # Examples - # - # pass:[text] - # - AttributeEntryPassMacroRx = /^pass:([a-z,]*)\[(.*)\]$/ - - # Matches an inline attribute reference. - # - # Examples - # - # {foo} - # {counter:pcount:1} - # {set:foo:bar} - # {set:name!} - # - AttributeReferenceRx = /(\\)?\{((set|counter2?):.+?|\w+(?:[\-]\w+)*)(\\)?\}/ - - ## Paragraphs and delimited blocks - - # Matches an anchor (i.e., id + optional reference text) on a line above a block. - # - # Examples - # - # [[idname]] - # [[idname,Reference Text]] - # - BlockAnchorRx = /^\[\[(?:|([#{CC_ALPHA}:_][#{CC_WORD}:.-]*)(?:,#{CG_BLANK}*(\S.*))?)\]\]$/ - - # Matches an attribute list above a block element. - # - # Examples - # - # # strictly positional - # [quote, Adam Smith, Wealth of Nations] - # - # # name/value pairs - # [NOTE, caption="Good to know"] - # - # # as attribute reference - # [{lead}] - # - BlockAttributeListRx = /^\[(|#{CG_BLANK}*[#{CC_WORD}\{,.#"'%].*)\]$/ - - # A combined pattern that matches either a block anchor or a block attribute list. - # - # TODO this one gets hit a lot, should be optimized as much as possible - BlockAttributeLineRx = /^\[(|#{CG_BLANK}*[#{CC_WORD}\{,.#"'%].*|\[(?:|[#{CC_ALPHA}:_][#{CC_WORD}:.-]*(?:,#{CG_BLANK}*\S.*)?)\])\]$/ - - # Matches a title above a block. - # - # Examples - # - # .Title goes here - # - BlockTitleRx = /^\.([^\s.].*)$/ - - # Matches an admonition label at the start of a paragraph. - # - # Examples - # - # NOTE: Just a little note. - # TIP: Don't forget! - # - AdmonitionParagraphRx = /^(#{ADMONITION_STYLES.to_a * '|'}):#{CG_BLANK}/ - - # Matches a literal paragraph, which is a line of text preceded by at least one space. - # - # Examples - # - # Foo - # Foo - LiteralParagraphRx = /^(#{CG_BLANK}+.*)$/ - - # Matches a comment block. - # - # Examples - # - # //// - # This is a block comment. - # It can span one or more lines. - # //// - CommentBlockRx = %r{^/{4,}$} - - # Matches a comment line. - # - # Examples - # - # // an then whatever - # - CommentLineRx = %r{^//(?:[^/]|$)} - - ## Section titles - - # Matches a single-line (Atx-style) section title. - # - # Examples - # - # == Foo - # # ^ a level 1 (h2) section title - # - # == Foo == - # # ^ also a level 1 (h2) section title - # - # match[1] is the delimiter, whose length determines the level - # match[2] is the title itself - # match[3] is an inline anchor, which becomes the section id - AtxSectionRx = /^((?:=|#){1,6})#{CG_BLANK}+(\S.*?)(?:#{CG_BLANK}+\1)?$/ - - # Matches the restricted section name for a two-line (Setext-style) section title. - # The name cannot begin with a dot and has at least one alphanumeric character. - SetextSectionTitleRx = /^((?=.*#{CG_WORD}+.*)[^.].*?)$/ - - # Matches the underline in a two-line (Setext-style) section title. - # - # Examples - # - # ====== || ------ || ~~~~~~ || ^^^^^^ || ++++++ - # - SetextSectionLineRx = /^(?:=|-|~|\^|\+)+$/ - - # Matches an anchor (i.e., id + optional reference text) inside a section title. - # - # Examples - # - # Section Title [[idname]] - # Section Title [[idname,Reference Text]] - # - InlineSectionAnchorRx = /^(.*?)#{CG_BLANK}+(\\)?\[\[([#{CC_ALPHA}:_][#{CC_WORD}:.-]*)(?:,#{CG_BLANK}*(\S.*?))?\]\]$/ - - # Matches invalid characters in a section id. - InvalidSectionIdCharsRx = /&(?:[a-zA-Z]{2,}|#\d{2,6}|#x[a-fA-F0-9]{2,5});|[^#{CC_WORD}]+?/ - - # Matches the block style used to designate a section title as a floating title. - # - # Examples - # - # [float] - # = Floating Title - # - FloatingTitleStyleRx = /^(?:float|discrete)\b/ - - ## Lists - - # Detects the start of any list item. - # - # NOTE we only have to check as far as the blank character because we know it means non-whitespace follows. - AnyListRx = /^(?:#{CG_BLANK}*(?:-|([*.\u2022])\1{0,4}|\d+\.|[a-zA-Z]\.|[IVXivx]+\))#{CG_BLANK}|#{CG_BLANK}*.*?(?::{2,4}|;;)(?:$|#{CG_BLANK})|#{CG_BLANK})/ - - # Matches an unordered list item (one level for hyphens, up to 5 levels for asterisks). - # - # Examples - # - # * Foo - # - Foo - # - # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces - # NOTE I want to use (-|([*\u2022])\2{0,4}) but breaks the parser since it relies on fixed match positions - UnorderedListRx = /^#{CG_BLANK}*(-|\*{1,5}|\u2022{1,5})#{CG_BLANK}+(.*)$/ - - # Matches an ordered list item (explicit numbering or up to 5 consecutive dots). - # - # Examples - # - # . Foo - # .. Foo - # 1. Foo (arabic, default) - # a. Foo (loweralpha) - # A. Foo (upperalpha) - # i. Foo (lowerroman) - # I. Foo (upperroman) - # - # NOTE leading space match is not always necessary, but is used for list reader - # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces - OrderedListRx = /^#{CG_BLANK}*(\.{1,5}|\d+\.|[a-zA-Z]\.|[IVXivx]+\))#{CG_BLANK}+(.*)$/ - - # Matches the ordinals for each type of ordered list. - OrderedListMarkerRxMap = { - :arabic => /\d+[.>]/, - :loweralpha => /[a-z]\./, - :lowerroman => /[ivx]+\)/, - :upperalpha => /[A-Z]\./, - :upperroman => /[IVX]+\)/ - #:lowergreek => /[a-z]\]/ - } - - # Matches a description list entry. - # - # Examples - # - # foo:: - # foo::: - # foo:::: - # foo;; - # - # # the term can be followed by a description on the same line... - # - # foo:: That which precedes 'bar' (see also, <>) - # - # # ...or on a separate line (optionally indented) - # - # foo:: - # That which precedes 'bar' (see also, <>) - # - # # the term or description may be an attribute reference - # - # {foo_term}:: {foo_def} - # - # NOTE negative match for comment line is intentional since that isn't handled when looking for next list item - # TODO check for line comment when scanning lines instead of in regex - # - DescriptionListRx = /^(?!\/\/)#{CG_BLANK}*(.*?)(:{2,4}|;;)(?:#{CG_BLANK}+(.*))?$/ - - # Matches a sibling description list item (which does not include the type in the key). - DescriptionListSiblingRx = { - # (?:.*?[^:])? - a non-capturing group which grabs longest sequence of characters that doesn't end w/ colon - '::' => /^(?!\/\/)#{CG_BLANK}*((?:.*[^:])?)(::)(?:#{CG_BLANK}+(.*))?$/, - ':::' => /^(?!\/\/)#{CG_BLANK}*((?:.*[^:])?)(:::)(?:#{CG_BLANK}+(.*))?$/, - '::::' => /^(?!\/\/)#{CG_BLANK}*((?:.*[^:])?)(::::)(?:#{CG_BLANK}+(.*))?$/, - ';;' => /^(?!\/\/)#{CG_BLANK}*(.*)(;;)(?:#{CG_BLANK}+(.*))?$/ - } - - # Matches a callout list item. - # - # Examples - # - # <1> Foo - # - # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces - CalloutListRx = /^#{CG_BLANK}+(.*)$/ - - # Matches a callout reference inside literal text. - # - # Examples - # <1> (optionally prefixed by //, #, -- or ;; line comment chars) - # <1> <2> (multiple callouts on one line) - # (for XML-based languages) - # - # NOTE extract regexps are applied line-by-line, so we can use $ as end-of-line char - CalloutExtractRx = /(?:(?:\/\/|#|--|;;) ?)?(\\)?(?=(?: ?\\?)*$)/ - CalloutExtractRxt = '(\\\\)?<()(\\d+)>(?=(?: ?\\\\?<\\d+>)*$)' - # NOTE special characters have not been replaced when scanning - CalloutQuickScanRx = /\\?(?=(?: ?\\?)*#{CC_EOL})/ - # NOTE special characters have already been replaced when converting to an SGML format - CalloutSourceRx = /(?:(?:\/\/|#|--|;;) ?)?(\\)?<!?(--|)(\d+)\2>(?=(?: ?\\?<!?\2\d+\2>)*#{CC_EOL})/ - CalloutSourceRxt = "(\\\\)?<()(\\d+)>(?=(?: ?\\\\?<\\d+>)*#{CC_EOL})" - - # A Hash of regexps for lists used for dynamic access. - ListRxMap = { - :ulist => UnorderedListRx, - :olist => OrderedListRx, - :dlist => DescriptionListRx, - :colist => CalloutListRx - } - - ## Tables - - # Parses the column spec (i.e., colspec) for a table. - # - # Examples - # - # 1*h,2*,^3e - # - ColumnSpecRx = /^(?:(\d+)\*)?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?(\d+%?)?([a-z])?$/ - - # Parses the start and end of a cell spec (i.e., cellspec) for a table. - # - # Examples - # - # 2.3+<.>m - # - # FIXME use step-wise scan (or treetop) rather than this mega-regexp - CellSpecStartRx = /^#{CG_BLANK}*(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ - CellSpecEndRx = /#{CG_BLANK}+(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ - - # Block macros - - # Matches the general block macro pattern. - # - # Examples - # - # gist::123456[] - # - #-- - # NOTE we've relaxed the match for target to accomodate the short format (e.g., name::[attrlist]) - GenericBlockMacroRx = /^(#{CG_WORD}+)::(\S*?)\[((?:\\\]|[^\]])*?)\]$/ - - # Matches an image, video or audio block macro. - # - # Examples - # - # image::filename.png[Caption] - # video::http://youtube.com/12345[Cats vs Dogs] - # - MediaBlockMacroRx = /^(image|video|audio)::(\S+?)\[((?:\\\]|[^\]])*?)\]$/ - - # Matches the TOC block macro. - # - # Examples - # - # toc::[] - # toc::[levels=2] - # - TocBlockMacroRx = /^toc::\[(.*?)\]$/ - - ## Inline macros - - # Matches an anchor (i.e., id + optional reference text) in the flow of text. - # - # Examples - # - # [[idname]] - # [[idname,Reference Text]] - # anchor:idname[] - # anchor:idname[Reference Text] - # - InlineAnchorRx = /\\?(?:\[\[([#{CC_ALPHA}:_][#{CC_WORD}:.-]*)(?:,#{CG_BLANK}*(\S.*?))?\]\]|anchor:(\S+)\[(.*?[^\\])?\])/ - - # Matches a bibliography anchor anywhere inline. - # - # Examples - # - # [[[Foo]]] - # - InlineBiblioAnchorRx = /\\?\[\[\[([#{CC_WORD}:][#{CC_WORD}:.-]*?)\]\]\]/ - - # Matches an inline e-mail address. - # - # doc.writer@example.com - # - EmailInlineMacroRx = /([\\>:\/])?#{CG_WORD}[#{CC_WORD}.%+-]*@#{CG_ALNUM}[#{CC_ALNUM}.-]*\.#{CG_ALPHA}{2,4}\b/ - - # Matches an inline footnote macro, which is allowed to span multiple lines. - # - # Examples - # footnote:[text] - # footnoteref:[id,text] - # footnoteref:[id] - # - FootnoteInlineMacroRx = /\\?(footnote(?:ref)?):\[(#{CC_ALL}*?[^\\])\]/m - - # Matches an image or icon inline macro. - # - # Examples - # - # image:filename.png[Alt Text] - # image:http://example.com/images/filename.png[Alt Text] - # image:filename.png[More [Alt\] Text] (alt text becomes "More [Alt] Text") - # icon:github[large] - # - ImageInlineMacroRx = /\\?(?:image|icon):([^:\[][^\[]*)\[((?:\\\]|[^\]])*?)\]/ - - # Matches an indexterm inline macro, which may span multiple lines. - # - # Examples - # - # indexterm:[Tigers,Big cats] - # (((Tigers,Big cats))) - # indexterm2:[Tigers] - # ((Tigers)) - # - IndextermInlineMacroRx = /\\?(?:(indexterm2?):\[(#{CC_ALL}*?[^\\])\]|\(\((#{CC_ALL}+?)\)\)(?!\)))/m - - # Matches either the kbd or btn inline macro. - # - # Examples - # - # kbd:[F3] - # kbd:[Ctrl+Shift+T] - # kbd:[Ctrl+\]] - # kbd:[Ctrl,T] - # btn:[Save] - # - KbdBtnInlineMacroRx = /\\?(?:kbd|btn):\[((?:\\\]|[^\]])+?)\]/ - - # Matches the delimiter used for kbd value. - # - # Examples - # - # Ctrl + Alt+T - # Ctrl,T - # - KbdDelimiterRx = /(?:\+|,)(?=#{CG_BLANK}*[^\1])/ - - # Matches an implicit link and some of the link inline macro. - # - # Examples - # - # http://github.com - # http://github.com[GitHub] - # - # FIXME revisit! the main issue is we need different rules for implicit vs explicit - LinkInlineRx = %r{(^|link:|<|[\s>\(\)\[\];])(\\?(?:https?|file|ftp|irc)://[^\s\[\]<]*[^\s.,\[\]<])(?:\[((?:\\\]|[^\]])*?)\])?} - - # Match a link or e-mail inline macro. - # - # Examples - # - # link:path[label] - # mailto:doc.writer@example.com[] - # - LinkInlineMacroRx = /\\?(?:link|mailto):([^\s\[]+)(?:\[((?:\\\]|[^\]])*?)\])/ - - # Matches a stem (and alternatives, asciimath and latexmath) inline macro, which may span multiple lines. - # - # Examples - # - # stem:[x != 0] - # asciimath:[x != 0] - # latexmath:[\sqrt{4} = 2] - # - StemInlineMacroRx = /\\?(stem|(?:latex|ascii)math):([a-z,]*)\[(#{CC_ALL}*?[^\\])\]/m - - # Matches a menu inline macro. - # - # Examples - # - # menu:File[New...] - # menu:View[Page Style > No Style] - # menu:View[Page Style, No Style] - # - MenuInlineMacroRx = /\\?menu:(#{CG_WORD}|#{CG_WORD}.*?\S)\[#{CG_BLANK}*(.+?)?\]/ - - # Matches an implicit menu inline macro. - # - # Examples - # - # "File > New..." - # - MenuInlineRx = /\\?"(#{CG_WORD}[^"]*?#{CG_BLANK}*>#{CG_BLANK}*[^" \t][^"]*)"/ - - # Matches an inline passthrough value, which may span multiple lines. - # - # Examples - # - # +text+ - # `text` (compat) - # - # NOTE we always capture the attributes so we know when to use compatible (i.e., legacy) behavior - PassInlineRx = { - false => ['+', '`', /(^|[^#{CC_WORD};:])(?:\[([^\]]+?)\])?(\\?(\+|`)(\S|\S#{CC_ALL}*?\S)\4)(?!#{CG_WORD})/m], - true => ['`', nil, /(^|[^`#{CC_WORD}])(?:\[([^\]]+?)\])?(\\?(`)([^`\s]|[^`\s]#{CC_ALL}*?\S)\4)(?![`#{CC_WORD}])/m] - } - - # Matches several variants of the passthrough inline macro, which may span multiple lines. - # - # Examples - # - # +++text+++ - # $$text$$ - # pass:quotes[text] - # - PassInlineMacroRx = /(?:(?:(\\?)\[([^\]]+?)\])?(\\{0,2})(\+{2,3}|\${2})(#{CC_ALL}*?)\4|(\\?)pass:([a-z,]*)\[(#{CC_ALL}*?[^\\])\])/m - - # Matches an xref (i.e., cross-reference) inline macro, which may span multiple lines. - # - # Examples - # - # <> - # xref:id[reftext] - # - # NOTE special characters have already been escaped, hence the entity references - XrefInlineMacroRx = /\\?(?:<<([#{CC_WORD}":.\/]#{CC_ALL}*?)>>|xref:([#{CC_WORD}":.\/]#{CC_ALL}*?)\[(#{CC_ALL}*?)\])/m - - ## Layout - - # Matches a trailing + preceded by at least one space character, - # which forces a hard line break (
    tag in HTML output). - # - # Examples - # - # Humpty Dumpty sat on a wall, + - # Humpty Dumpty had a great fall. - # - if RUBY_ENGINE == 'opal' - # NOTE JavaScript only treats ^ and $ as line boundaries in multiline regexp; . won't match newlines - LineBreakRx = /^(.*)[ \t]\+$/m - else - LineBreakRx = /^(.*)[[:blank:]]\+$/ - end - - # Matches an AsciiDoc horizontal rule or AsciiDoc page break. - # - # Examples - # - # ''' (horizontal rule) - # <<< (page break) - # - LayoutBreakLineRx = /^('|<){3,}$/ - - # Matches an AsciiDoc or Markdown horizontal rule or AsciiDoc page break. - # - # Examples - # - # ''' or ' ' ' (horizontal rule) - # --- or - - - (horizontal rule) - # *** or * * * (horizontal rule) - # <<< (page break) - # - LayoutBreakLinePlusRx = /^(?:'|<){3,}$|^ {0,3}([-\*_])( *)\1\2\1$/ - - ## General - - # Matches a blank line. - # - # NOTE allows for empty space in line as it could be left by the template engine - BlankLineRx = /^#{CG_BLANK}*\n/ - - # Matches a comma or semi-colon delimiter. - # - # Examples - # - # one,two - # three;four - # - DataDelimiterRx = /,|;/ - - # Matches a single-line of text enclosed in double quotes, capturing the quote char and text. - # - # Examples - # - # "Who goes there?" - # - DoubleQuotedRx = /^("|)(.*)\1$/ - - # Matches multiple lines of text enclosed in double quotes, capturing the quote char and text. - # - # Examples - # - # "I am a run-on sentence and I like - # to take up multiple lines and I - # still want to be matched." - # - DoubleQuotedMultiRx = /^("|)(#{CC_ALL}*)\1$/m - - # Matches one or more consecutive digits at the end of a line. - # - # Examples - # - # docbook45 - # html5 - # - TrailingDigitsRx = /\d+$/ - - # Matches a space escaped by a backslash. - # - # Examples - # - # one\ two\ three - # - EscapedSpaceRx = /\\(#{CG_BLANK})/ - - # Matches a space delimiter that's not escaped. - # - # Examples - # - # one two three four - # - SpaceDelimiterRx = /([^\\])#{CG_BLANK}+/ - - # Matches a + or - modifier in a subs list - # - SubModifierSniffRx = /[+-]/ - - # Matches any character with multibyte support explicitly enabled (length of multibyte char = 1) - # - # NOTE If necessary to hide use of the language modifier (u) from JavaScript, use (Regexp.new '.', false, 'u') - # - UnicodeCharScanRx = unless RUBY_ENGINE == 'opal' - FORCE_UNICODE_LINE_LENGTH ? /./u : nil - end - - # Detects strings that resemble URIs. - # - # Examples - # http://domain - # https://domain - # file:///path - # data:info - # - # not c:/sample.adoc or c:\sample.adoc - # - UriSniffRx = %r{^#{CG_ALPHA}[#{CC_ALNUM}.+-]+:/{0,2}} - - # Detects the end of an implicit URI in the text - # - # Examples - # - # (http://google.com) - # >http://google.com< - # (See http://google.com): - # - UriTerminator = /[);:]$/ - - # Detects XML tags - XmlSanitizeRx = /<[^>]+>/ - - # Unused - - # Detects any fenced block delimiter, including: - # listing, literal, example, sidebar, quote, passthrough, table and fenced code - # Does not match open blocks or air quotes - # TIP position the most common blocks towards the front of the pattern - #BlockDelimiterRx = %r{^(?:(?:-|\.|=|\*|_|\+|/){4,}|[\|,;!]={3,}|(?:`|~){3,}.*)$} - - # Matches an escaped single quote within a word - # - # Examples - # - # Here\'s Johnny! - # - #EscapedSingleQuoteRx = /(#{CG_WORD})\\'(#{CG_WORD})/ - # an alternative if our backend generates single-quoted html/xml attributes - #EscapedSingleQuoteRx = /(#{CG_WORD}|=)\\'(#{CG_WORD})/ - - # Matches whitespace at the beginning of the line - #LeadingSpacesRx = /^(#{CG_BLANK}*)/ - - # Matches parent directory references at the beginning of a path - #LeadingParentDirsRx = /^(?:\.\.\/)*/ - - #StripLineWise = /\A(?:\s*\n)?(#{CC_ALL}*?)\s*\z/m - #end + # attributes which be changed throughout the flow of the document (e.g., sectnums) + FLEXIBLE_ATTRIBUTES = ['sectnums'] INTRINSIC_ATTRIBUTES = { - 'startsb' => '[', - 'endsb' => ']', - 'vbar' => '|', - 'caret' => '^', - 'asterisk' => '*', - 'tilde' => '~', - 'plus' => '+', - 'backslash' => '\\', - 'backtick' => '`', - 'blank' => '', - 'empty' => '', - 'sp' => ' ', + 'startsb' => '[', + 'endsb' => ']', + 'vbar' => '|', + 'caret' => '^', + 'asterisk' => '*', + 'tilde' => '~', + 'plus' => '+', + 'backslash' => '\\', + 'backtick' => '`', + 'blank' => '', + 'empty' => '', + 'sp' => ' ', 'two-colons' => '::', 'two-semicolons' => ';;', - 'nbsp' => ' ', - 'deg' => '°', - 'zwsp' => '​', - 'quot' => '"', - 'apos' => ''', - 'lsquo' => '‘', - 'rsquo' => '’', - 'ldquo' => '“', - 'rdquo' => '”', - 'wj' => '⁠', - 'brvbar' => '¦', - 'cpp' => 'C++', - 'amp' => '&', - 'lt' => '<', - 'gt' => '>' - } - - # unconstrained quotes:: can appear anywhere - # constrained quotes:: must be bordered by non-word characters - # NOTE these substitutions are processed in the order they appear here and - # the order in which they are replaced is important - quote_subs = [ - # **strong** - [:strong, :unconstrained, /\\?(?:\[([^\]]+?)\])?\*\*(#{CC_ALL}+?)\*\*/m], - - # *strong* - [:strong, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?\*(\S|\S#{CC_ALL}*?\S)\*(?!#{CG_WORD})/m], - - # "`double-quoted`" - [:double, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?"`(\S|\S#{CC_ALL}*?\S)`"(?!#{CG_WORD})/m], - - # '`single-quoted`' - [:single, :constrained, /(^|[^#{CC_WORD};:`}])(?:\[([^\]]+?)\])?'`(\S|\S#{CC_ALL}*?\S)`'(?!#{CG_WORD})/m], - - # ``monospaced`` - [:monospaced, :unconstrained, /\\?(?:\[([^\]]+?)\])?``(#{CC_ALL}+?)``/m], - - # `monospaced` - [:monospaced, :constrained, /(^|[^#{CC_WORD};:"'`}])(?:\[([^\]]+?)\])?`(\S|\S#{CC_ALL}*?\S)`(?![#{CC_WORD}"'`])/m], - - # __emphasis__ - [:emphasis, :unconstrained, /\\?(?:\[([^\]]+?)\])?__(#{CC_ALL}+?)__/m], - - # _emphasis_ - [:emphasis, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?_(\S|\S#{CC_ALL}*?\S)_(?!#{CG_WORD})/m], - - # ##mark## (referred to in AsciiDoc Python as unquoted) - [:mark, :unconstrained, /\\?(?:\[([^\]]+?)\])?##(#{CC_ALL}+?)##/m], - - # #mark# (referred to in AsciiDoc Python as unquoted) - [:mark, :constrained, /(^|[^#{CC_WORD}&;:}])(?:\[([^\]]+?)\])?#(\S|\S#{CC_ALL}*?\S)#(?!#{CG_WORD})/m], - - # ^superscript^ - [:superscript, :unconstrained, /\\?(?:\[([^\]]+?)\])?\^(\S+?)\^/], - - # ~subscript~ - [:subscript, :unconstrained, /\\?(?:\[([^\]]+?)\])?~(\S+?)~/] - ] + 'nbsp' => ' ', + 'deg' => '°', + 'zwsp' => '​', + 'quot' => '"', + 'apos' => ''', + 'lsquo' => '‘', + 'rsquo' => '’', + 'ldquo' => '“', + 'rdquo' => '”', + 'wj' => '⁠', + 'brvbar' => '¦', + 'pp' => '++', + 'cpp' => 'C++', + 'amp' => '&', + 'lt' => '<', + 'gt' => '>' + } + + # Regular expression character classes (to ensure regexp compatibility between Ruby and JavaScript) + # CC stands for "character class", CG stands for "character class group" + unless RUBY_ENGINE == 'opal' + # CC_ALL is any character, including newlines (must be accompanied by multiline regexp flag) + CC_ALL = '.' + # CC_ANY is any character except newlines + CC_ANY = '.' + CC_EOL = '$' + CC_ALPHA = CG_ALPHA = '\p{Alpha}' + CC_ALNUM = CG_ALNUM = '\p{Alnum}' + CG_BLANK = '\p{Blank}' + CC_WORD = CG_WORD = '\p{Word}' + end - compat_quote_subs = quote_subs.dup - # ``quoted'' - compat_quote_subs[2] = [:double, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?``(\S|\S#{CC_ALL}*?\S)''(?!#{CG_WORD})/m] - # `quoted' - compat_quote_subs[3] = [:single, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?`(\S|\S#{CC_ALL}*?\S)'(?!#{CG_WORD})/m] - # ++monospaced++ - compat_quote_subs[4] = [:monospaced, :unconstrained, /\\?(?:\[([^\]]+?)\])?\+\+(#{CC_ALL}+?)\+\+/m] - # +monospaced+ - compat_quote_subs[5] = [:monospaced, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?\+(\S|\S#{CC_ALL}*?\S)\+(?!#{CG_WORD})/m] - # #unquoted# - #compat_quote_subs[8] = [:unquoted, *compat_quote_subs[8][1..-1]] - # ##unquoted## - #compat_quote_subs[9] = [:unquoted, *compat_quote_subs[9][1..-1]] - # 'emphasis' - compat_quote_subs.insert 3, [:emphasis, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?'(\S|\S#{CC_ALL}*?\S)'(?!#{CG_WORD})/m] - - QUOTE_SUBS = { - false => quote_subs, - true => compat_quote_subs - } - quote_subs = nil - compat_quote_subs = nil + QUOTE_SUBS = {}.tap do |accum| + # unconstrained quotes:: can appear anywhere + # constrained quotes:: must be bordered by non-word characters + # NOTE these substitutions are processed in the order they appear here and + # the order in which they are replaced is important + accum[false] = normal = [ + # **strong** + [:strong, :unconstrained, /\\?(?:\[([^\]]+)\])?\*\*(#{CC_ALL}+?)\*\*/m], + # *strong* + [:strong, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?\*(\S|\S#{CC_ALL}*?\S)\*(?!#{CG_WORD})/m], + # "`double-quoted`" + [:double, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?"`(\S|\S#{CC_ALL}*?\S)`"(?!#{CG_WORD})/m], + # '`single-quoted`' + [:single, :constrained, /(^|[^#{CC_WORD};:`}])(?:\[([^\]]+)\])?'`(\S|\S#{CC_ALL}*?\S)`'(?!#{CG_WORD})/m], + # ``monospaced`` + [:monospaced, :unconstrained, /\\?(?:\[([^\]]+)\])?``(#{CC_ALL}+?)``/m], + # `monospaced` + [:monospaced, :constrained, /(^|[^#{CC_WORD};:"'`}])(?:\[([^\]]+)\])?`(\S|\S#{CC_ALL}*?\S)`(?![#{CC_WORD}"'`])/m], + # __emphasis__ + [:emphasis, :unconstrained, /\\?(?:\[([^\]]+)\])?__(#{CC_ALL}+?)__/m], + # _emphasis_ + [:emphasis, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?_(\S|\S#{CC_ALL}*?\S)_(?!#{CG_WORD})/m], + # ##mark## (referred to in AsciiDoc Python as unquoted) + [:mark, :unconstrained, /\\?(?:\[([^\]]+)\])?##(#{CC_ALL}+?)##/m], + # #mark# (referred to in AsciiDoc Python as unquoted) + [:mark, :constrained, /(^|[^#{CC_WORD}&;:}])(?:\[([^\]]+)\])?#(\S|\S#{CC_ALL}*?\S)#(?!#{CG_WORD})/m], + # ^superscript^ + [:superscript, :unconstrained, /\\?(?:\[([^\]]+)\])?\^(\S+?)\^/], + # ~subscript~ + [:subscript, :unconstrained, /\\?(?:\[([^\]]+)\])?~(\S+?)~/] + ] + + accum[true] = compat = normal.drop 0 + # ``quoted'' + compat[2] = [:double, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?``(\S|\S#{CC_ALL}*?\S)''(?!#{CG_WORD})/m] + # `quoted' + compat[3] = [:single, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?`(\S|\S#{CC_ALL}*?\S)'(?!#{CG_WORD})/m] + # ++monospaced++ + compat[4] = [:monospaced, :unconstrained, /\\?(?:\[([^\]]+)\])?\+\+(#{CC_ALL}+?)\+\+/m] + # +monospaced+ + compat[5] = [:monospaced, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?\+(\S|\S#{CC_ALL}*?\S)\+(?!#{CG_WORD})/m] + # #unquoted# + #compat[8] = [:unquoted, *compat[8][1..-1]] + # ##unquoted## + #compat[9] = [:unquoted, *compat[9][1..-1]] + # 'emphasis' + compat.insert 3, [:emphasis, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?'(\S|\S#{CC_ALL}*?\S)'(?!#{CG_WORD})/m] + end - # NOTE in Ruby 1.8.7, [^\\] does not match start of line, - # so we need to match it explicitly - # order is significant + # NOTE order of replacements is significant REPLACEMENTS = [ # (C) [/\\?\(C\)/, '©', :none], @@ -1231,13 +468,13 @@ [/\\?\(R\)/, '®', :none], # (TM) [/\\?\(TM\)/, '™', :none], - # foo -- bar - # FIXME this drops the endline if it appears at end of line - [/(^|\n| |\\)--( |\n|$)/, ' — ', :none], + # foo -- bar (where either space character can be a newline) + # NOTE this necessarily drops the newline if replacement appears at end of line + [/(?: |\n|^|\\)--(?: |\n|$)/, ' — ', :none], # foo--bar [/(#{CG_WORD})\\?--(?=#{CG_WORD})/, '—​', :leading], # ellipsis - [/\\?\.\.\./, '…​', :leading], + [/\\?\.\.\./, '…​', :none], # right single quote [/\\?`'/, '’', :none], # apostrophe (inside a word) @@ -1251,371 +488,71 @@ # left double arrow <= [/\\?<=/, '⇐', :none], # restore entities - [/\\?(&)amp;((?:[a-zA-Z]{2,}|#\d{2,6}|#x[a-fA-F0-9]{2,5});)/, '', :bounding] + [/\\?(&)amp;((?:[a-zA-Z][a-zA-Z]+\d{0,2}|#\d\d\d{0,4}|#x[\da-fA-F][\da-fA-F][\da-fA-F]{0,3});)/, '', :bounding] ] - class << self - - # Public: Parse the AsciiDoc source input into a {Document} - # - # Accepts input as an IO (or StringIO), String or String Array object. If the - # input is a File, information about the file is stored in attributes on the - # Document object. - # - # input - the AsciiDoc source as a IO, String or Array. - # options - a String, Array or Hash of options to control processing (default: {}) - # String and Array values are converted into a Hash. - # See {Document#initialize} for details about these options. - # - # Returns the Document - def load input, options = {} - options = options.dup - if (timings = options[:timings]) - timings.start :read - end - - attributes = options[:attributes] = if !(attrs = options[:attributes]) - {} - elsif ::Hash === attrs || (::RUBY_ENGINE_JRUBY && ::Java::JavaUtil::Map === attrs) - attrs.dup - elsif ::Array === attrs - attrs.inject({}) do |accum, entry| - k, v = entry.split '=', 2 - accum[k] = v || '' - accum - end - elsif ::String === attrs - # convert non-escaped spaces into null character, so we split on the - # correct spaces chars, and restore escaped spaces - capture_1 = '\1' - attrs = attrs.gsub(SpaceDelimiterRx, %(#{capture_1}#{NULL})).gsub(EscapedSpaceRx, capture_1) - attrs.split(NULL).inject({}) do |accum, entry| - k, v = entry.split '=', 2 - accum[k] = v || '' - accum - end - elsif (attrs.respond_to? :keys) && (attrs.respond_to? :[]) - # convert it to a Hash as we know it - original_attrs = attrs - attrs = {} - original_attrs.keys.each do |key| - attrs[key] = original_attrs[key] - end - attrs - else - raise ::ArgumentError, %(illegal type for attributes option: #{attrs.class.ancestors}) - end - - lines = nil - if ::File === input - # TODO cli checks if input path can be read and is file, but might want to add check to API - input_path = ::File.expand_path input.path - # See https://reproducible-builds.org/specs/source-date-epoch/ - input_mtime = ::ENV['SOURCE_DATE_EPOCH'] ? (::Time.at ::ENV['SOURCE_DATE_EPOCH'].to_i).utc : input.mtime - lines = input.readlines - # hold off on setting infile and indir until we get a better sense of their purpose - attributes['docfile'] = input_path - attributes['docdir'] = ::File.dirname input_path - attributes['docname'] = Helpers.basename input_path, true - docdate = (attributes['docdate'] ||= input_mtime.strftime('%Y-%m-%d')) - doctime = (attributes['doctime'] ||= input_mtime.strftime('%H:%M:%S %Z')) - attributes['docdatetime'] = %(#{docdate} #{doctime}) - elsif input.respond_to? :readlines - # NOTE tty, pipes & sockets can't be rewound, but can't be sniffed easily either - # just fail the rewind operation silently to handle all cases - begin - input.rewind - rescue - end - lines = input.readlines - elsif ::String === input - lines = input.lines.entries - elsif ::Array === input - lines = input.dup - else - raise ::ArgumentError, %(unsupported input type: #{input.class}) - end - - if timings - timings.record :read - timings.start :parse - end - - if options[:parse] == false - doc = Document.new lines, options - else - doc = (Document.new lines, options).parse - end - - timings.record :parse if timings - doc - rescue => ex - begin - context = %(asciidoctor: FAILED: #{attributes['docfile'] || ''}: Failed to load AsciiDoc document) - if ex.respond_to? :exception - # The original message must be explicitely preserved when wrapping a Ruby exception - wrapped_ex = ex.exception %(#{context} - #{ex.message}) - # JRuby automatically sets backtrace, but not MRI - wrapped_ex.set_backtrace ex.backtrace - else - # Likely a Java exception class - wrapped_ex = ex.class.new context, ex - wrapped_ex.stack_trace = ex.stack_trace - end - rescue - wrapped_ex = ex - end - raise wrapped_ex - end - - # Public: Parse the contents of the AsciiDoc source file into an Asciidoctor::Document - # - # Accepts input as an IO, String or String Array object. If the - # input is a File, information about the file is stored in - # attributes on the Document. + # Internal: Automatically load the Asciidoctor::Extensions module. # - # input - the String AsciiDoc source filename - # options - a String, Array or Hash of options to control processing (default: {}) - # String and Array values are converted into a Hash. - # See Asciidoctor::Document#initialize for details about options. + # Requires the Asciidoctor::Extensions module if the name is :Extensions. + # Otherwise, delegates to the super method. # - # Returns the Asciidoctor::Document - def load_file filename, options = {} - self.load ::File.new(filename || ''), options - end - - # Public: Parse the AsciiDoc source input into an Asciidoctor::Document and - # convert it to the specified backend format. - # - # Accepts input as an IO, String or String Array object. If the - # input is a File, information about the file is stored in - # attributes on the Document. - # - # If the :in_place option is true, and the input is a File, the output is - # written to a file adjacent to the input file, having an extension that - # corresponds to the backend format. Otherwise, if the :to_file option is - # specified, the file is written to that file. If :to_file is not an absolute - # path, it is resolved relative to :to_dir, if given, otherwise the - # Document#base_dir. If the target directory does not exist, it will not be - # created unless the :mkdirs option is set to true. If the file cannot be - # written because the target directory does not exist, or because it falls - # outside of the Document#base_dir in safe mode, an IOError is raised. - # - # If the output is going to be written to a file, the header and footer are - # included unless specified otherwise (writing to a file implies creating a - # standalone document). Otherwise, the header and footer are not included by - # default and the converted result is returned. - # - # input - the String AsciiDoc source filename - # options - a String, Array or Hash of options to control processing (default: {}) - # String and Array values are converted into a Hash. - # See Asciidoctor::Document#initialize for details about options. - # - # Returns the Document object if the converted String is written to a - # file, otherwise the converted String - def convert input, options = {} - options = options.dup - options.delete(:parse) - to_file = options.delete(:to_file) - to_dir = options.delete(:to_dir) - mkdirs = options.delete(:mkdirs) || false - timings = options[:timings] - - case to_file - when true, nil - write_to_same_dir = !to_dir && ::File === input - stream_output = false - write_to_target = to_dir - to_file = nil - when false - write_to_same_dir = false - stream_output = false - write_to_target = false - to_file = nil - when '/dev/null' - return self.load input, options - else - write_to_same_dir = false - stream_output = to_file.respond_to? :write - write_to_target = stream_output ? false : to_file - end - - unless options.key? :header_footer - options[:header_footer] = true if write_to_same_dir || write_to_target - end - - # NOTE at least make intended target directory available, if there is one - if write_to_same_dir - input_path = ::File.expand_path input.path - options[:to_dir] = (outdir = ::File.dirname input_path) - elsif write_to_target - if to_dir - if to_file - options[:to_dir] = ::File.dirname ::File.expand_path(::File.join to_dir, to_file) - else - options[:to_dir] = ::File.expand_path to_dir - end - elsif to_file - options[:to_dir] = ::File.dirname ::File.expand_path to_file - end + # This method provides the same functionality as using autoload on + # Asciidoctor::Extensions, except that the constant isn't recognized as + # defined prior to it being loaded. + # + # Returns the resolved constant, if resolved, otherwise nothing. + def self.const_missing name + if name == :Extensions + require_relative 'asciidoctor/extensions' + Extensions else - options[:to_dir] = nil + super end + end unless RUBY_ENGINE == 'opal' - doc = self.load input, options - - if write_to_same_dir - outfile = ::File.join outdir, %(#{doc.attributes['docname']}#{doc.outfilesuffix}) - if outfile == input_path - raise ::IOError, %(input file and output file cannot be the same: #{outfile}) - end - elsif write_to_target - working_dir = options.has_key?(:base_dir) ? ::File.expand_path(options[:base_dir]) : ::File.expand_path(::Dir.pwd) - # QUESTION should the jail be the working_dir or doc.base_dir??? - jail = doc.safe >= SafeMode::SAFE ? working_dir : nil - if to_dir - outdir = doc.normalize_system_path(to_dir, working_dir, jail, :target_name => 'to_dir', :recover => false) - if to_file - outfile = doc.normalize_system_path(to_file, outdir, nil, :target_name => 'to_dir', :recover => false) - # reestablish outdir as the final target directory (in the case to_file had directory segments) - outdir = ::File.dirname outfile - else - outfile = ::File.join outdir, %(#{doc.attributes['docname']}#{doc.outfilesuffix}) - end - elsif to_file - outfile = doc.normalize_system_path(to_file, working_dir, jail, :target_name => 'to_dir', :recover => false) - # establish outdir as the final target directory (in the case to_file had directory segments) - outdir = ::File.dirname outfile - end - - unless ::File.directory? outdir - if mkdirs - Helpers.mkdir_p outdir - else - # NOTE we intentionally refer to the directory as it was passed to the API - raise ::IOError, %(target directory does not exist: #{to_dir}) - end - end - else - outfile = to_file - outdir = nil - end - - timings.start :convert if timings - opts = outfile && !stream_output ? { 'outfile' => outfile, 'outdir' => outdir } : {} - output = doc.convert opts - timings.record :convert if timings - - if outfile - timings.start :write if timings - doc.write output, outfile - timings.record :write if timings - - # NOTE document cannot control this behavior if safe >= SafeMode::SERVER - # NOTE skip if stylesdir is a URI - if !stream_output && doc.safe < SafeMode::SECURE && (doc.attr? 'linkcss') && - (doc.attr? 'copycss') && (doc.attr? 'basebackend-html') && - !((stylesdir = (doc.attr 'stylesdir')) && (Helpers.uriish? stylesdir)) - copy_asciidoctor_stylesheet = false - copy_user_stylesheet = false - if (stylesheet = (doc.attr 'stylesheet')) - if DEFAULT_STYLESHEET_KEYS.include? stylesheet - copy_asciidoctor_stylesheet = true - elsif !(Helpers.uriish? stylesheet) - copy_user_stylesheet = true - end - end - copy_coderay_stylesheet = (doc.attr? 'source-highlighter', 'coderay') && (doc.attr 'coderay-css', 'class') == 'class' - copy_pygments_stylesheet = (doc.attr? 'source-highlighter', 'pygments') && (doc.attr 'pygments-css', 'class') == 'class' - if copy_asciidoctor_stylesheet || copy_user_stylesheet || copy_coderay_stylesheet || copy_pygments_stylesheet - stylesoutdir = doc.normalize_system_path(stylesdir, outdir, doc.safe >= SafeMode::SAFE ? outdir : nil) - Helpers.mkdir_p stylesoutdir if mkdirs - - if copy_asciidoctor_stylesheet - Stylesheets.instance.write_primary_stylesheet stylesoutdir - # FIXME should Stylesheets also handle the user stylesheet? - elsif copy_user_stylesheet - if (stylesheet_src = (doc.attr 'copycss')).empty? - stylesheet_src = doc.normalize_system_path stylesheet - else - # NOTE in this case, copycss is a source location (but cannot be a URI) - stylesheet_src = doc.normalize_system_path stylesheet_src - end - stylesheet_dst = doc.normalize_system_path stylesheet, stylesoutdir, (doc.safe >= SafeMode::SAFE ? outdir : nil) - unless stylesheet_src == stylesheet_dst || (stylesheet_content = doc.read_asset stylesheet_src).nil? - ::File.open(stylesheet_dst, 'w') {|f| - f.write stylesheet_content - } - end - end - - if copy_coderay_stylesheet - Stylesheets.instance.write_coderay_stylesheet stylesoutdir - elsif copy_pygments_stylesheet - Stylesheets.instance.write_pygments_stylesheet stylesoutdir, (doc.attr 'pygments-style') - end - end - end - doc - else - output - end - end - - # Alias render to convert to maintain backwards compatibility - alias :render :convert - - # Public: Parse the contents of the AsciiDoc source file into an - # Asciidoctor::Document and convert it to the specified backend format. - # - # input - the String AsciiDoc source filename - # options - a String, Array or Hash of options to control processing (default: {}) - # String and Array values are converted into a Hash. - # See Asciidoctor::Document#initialize for details about options. - # - # Returns the Document object if the converted String is written to a - # file, otherwise the converted String - def convert_file filename, options = {} - self.convert ::File.new(filename || ''), options - end - - # Alias render_file to convert_file to maintain backwards compatibility - alias :render_file :convert_file - - end - - if RUBY_ENGINE == 'opal' - require 'asciidoctor/version' - require 'asciidoctor/timings' - else - autoload :VERSION, 'asciidoctor/version' - autoload :Timings, 'asciidoctor/timings' + unless RUBY_ENGINE == 'opal' + autoload :SyntaxHighlighter, %(#{LIB_DIR}/asciidoctor/syntax_highlighter) + autoload :Timings, %(#{LIB_DIR}/asciidoctor/timings) end end # core extensions -require 'asciidoctor/core_ext' +require_relative 'asciidoctor/core_ext' -# modules -require 'asciidoctor/helpers' -require 'asciidoctor/substitutors' +# modules and helpers +require_relative 'asciidoctor/helpers' +require_relative 'asciidoctor/logging' +require_relative 'asciidoctor/rx' +require_relative 'asciidoctor/substitutors' +require_relative 'asciidoctor/version' # abstract classes -require 'asciidoctor/abstract_node' -require 'asciidoctor/abstract_block' +require_relative 'asciidoctor/abstract_node' +require_relative 'asciidoctor/abstract_block' # concrete classes -require 'asciidoctor/attribute_list' -require 'asciidoctor/block' -require 'asciidoctor/callouts' -require 'asciidoctor/converter' -require 'asciidoctor/converter/html5' if RUBY_ENGINE_OPAL -require 'asciidoctor/document' -require 'asciidoctor/inline' -require 'asciidoctor/list' -require 'asciidoctor/parser' -require 'asciidoctor/path_resolver' -require 'asciidoctor/reader' -require 'asciidoctor/section' -require 'asciidoctor/stylesheets' -require 'asciidoctor/table' +require_relative 'asciidoctor/attribute_list' +require_relative 'asciidoctor/block' +require_relative 'asciidoctor/callouts' +require_relative 'asciidoctor/converter' +require_relative 'asciidoctor/document' +require_relative 'asciidoctor/inline' +require_relative 'asciidoctor/list' +require_relative 'asciidoctor/parser' +require_relative 'asciidoctor/path_resolver' +require_relative 'asciidoctor/reader' +require_relative 'asciidoctor/section' +require_relative 'asciidoctor/stylesheets' +require_relative 'asciidoctor/table' +require_relative 'asciidoctor/writer' + +# main API entry points +require_relative 'asciidoctor/load' +require_relative 'asciidoctor/convert' + +if RUBY_ENGINE == 'opal' + require_relative 'asciidoctor/syntax_highlighter' + require_relative 'asciidoctor/timings' + # this require is satisfied by the Asciidoctor.js build; it supplies compile and runtime overrides for Asciidoctor.js + require 'asciidoctor/js/postscript' +end diff -Nru asciidoctor-1.5.5/LICENSE asciidoctor-2.0.10/LICENSE --- asciidoctor-1.5.5/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/LICENSE 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,22 @@ +MIT License + +Copyright (C) 2012-2019 Dan Allen, Sarah White, Ryan Waldron, and the +individual contributors to Asciidoctor. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff -Nru asciidoctor-1.5.5/LICENSE.adoc asciidoctor-2.0.10/LICENSE.adoc --- asciidoctor-1.5.5/LICENSE.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/LICENSE.adoc 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -.The MIT License -.... -Copyright (C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -.... diff -Nru asciidoctor-1.5.5/man/asciidoctor.1 asciidoctor-2.0.10/man/asciidoctor.1 --- asciidoctor-1.5.5/man/asciidoctor.1 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/man/asciidoctor.1 2019-08-18 16:11:54.000000000 +0000 @@ -1,31 +1,40 @@ '\" t .\" Title: asciidoctor .\" Author: Dan Allen, Sarah White, Ryan Waldron -.\" Generator: Asciidoctor 1.5.5 -.\" Date: 2016-10-05 +.\" Generator: Asciidoctor 2.0.10 +.\" Date: 2019-05-31 .\" Manual: Asciidoctor Manual -.\" Source: Asciidoctor 1.5.5 +.\" Source: Asciidoctor 2.0.10 .\" Language: English .\" -.TH "ASCIIDOCTOR" "1" "2016-10-05" "Asciidoctor 1.5.5" "Asciidoctor Manual" +.TH "ASCIIDOCTOR" "1" "2019-05-31" "Asciidoctor 2.0.10" "Asciidoctor Manual" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 .nh .ad l .de URL -\\$2 \(laURL: \\$1 \(ra\\$3 +\fI\\$2\fP <\\$1>\\$3 .. -.if \n[.g] .mso www.tmac -.LINKSTYLE blue R < > +.als MTO URL +.if \n[.g] \{\ +. mso www.tmac +. am URL +. ad l +. . +. am MTO +. ad l +. . +. LINKSTYLE blue R < > +.\} .SH "NAME" -asciidoctor \- converts AsciiDoc source files to HTML, DocBook and other formats +asciidoctor \- converts AsciiDoc source files to HTML, DocBook, and other formats .SH "SYNOPSIS" .sp \fBasciidoctor\fP [\fIOPTION\fP]... \fIFILE\fP... .SH "DESCRIPTION" .sp -The asciidoctor(1) command converts the AsciiDoc source file(s) \fIFILE\fP to HTML5, DocBook 5, DocBook 4.5, man(ual) page and other custom output formats. +The asciidoctor(1) command converts the AsciiDoc source file(s) \fIFILE\fP to HTML5, DocBook 5, man(ual) page, and other custom output formats. .sp If \fIFILE\fP is \fI\-\fP then the AsciiDoc source is read from standard input. .SH "OPTIONS" @@ -34,21 +43,21 @@ \fB\-B, \-\-base\-dir\fP=\fIDIR\fP .RS 4 Base directory containing the document and resources. -Defaults to the directory containing the source file, or the working directory if the source is read from a stream. -Can be used as a way to chroot the execution of the program. +Defaults to the directory containing the source file or, if the source is read from a stream, the working directory. +When combined with the safe mode setting, can be used to chroot the execution of the program. .RE .sp \fB\-S, \-\-safe\-mode\fP=\fISAFE_MODE\fP .RS 4 -Set safe mode level: \fIunsafe\fP, \fIsafe\fP, \fIserver\fP or \fIsecure\fP. -Disables potentially dangerous macros in source files, such as \f[CR]include::[]\fP. +Set safe mode level: \fIunsafe\fP, \fIsafe\fP, \fIserver\fP, or \fIsecure\fP. +Disables potentially dangerous macros in source files, such as \f(CRinclude::[]\fP. If not set, the safe mode level defaults to \fIunsafe\fP when Asciidoctor is invoked using this script. .RE .sp \fB\-\-safe\fP .RS 4 Set safe mode level to \fIsafe\fP. -Enables include macros, but restricts access to ancestor paths of source file. +Enables include directives, but prevents access to ancestor paths of source file. Provided for compatibility with the asciidoc command. If not set, the safe mode level defaults to \fIunsafe\fP when Asciidoctor is invoked using this script. .RE @@ -56,11 +65,11 @@ .sp \fB\-a, \-\-attribute\fP=\fIATTRIBUTE\fP .RS 4 -Define, override or delete a document attribute. -Command\-line attributes take precedence over attributes defined in the source file. +Define, override, or unset a document attribute. +Command\-line attributes take precedence over attributes defined in the source file unless either the name or value ends in \fI@\fP. .sp \fIATTRIBUTE\fP is normally formatted as a key\-value pair, in the form \fINAME=VALUE\fP. -Alternate acceptable forms are \fINAME\fP (where the \fIVALUE\fP defaults to an empty string), \fINAME!\fP (unassigns the \fINAME\fP attribute) and \fINAME=VALUE@\fP (where \fIVALUE\fP does not override value of \fINAME\fP attribute if it\(cqs already defined in the source document). +Alternate forms are \fINAME\fP (where the \fIVALUE\fP defaults to an empty string), \fINAME!\fP (unsets the \fINAME\fP attribute), and \fINAME=VALUE@\fP (or \fINAME@=VALUE\fP) (where \fIVALUE\fP does not override the \fINAME\fP attribute if it\(cqs already defined in the source document). Values containing spaces should be enclosed in quotes. .sp This option may be specified more than once. @@ -68,33 +77,27 @@ .sp \fB\-b, \-\-backend\fP=\fIBACKEND\fP .RS 4 -Backend output file format: \fIhtml5\fP, \fIdocbook5\fP, \fIdocbook45\fP and \fImanpage\fP are supported out of the box. +Backend output file format: \fIhtml5\fP, \fIdocbook5\fP, \fIdocbook45\fP, and \fImanpage\fP are supported out of the box. You can also use the backend alias names \fIhtml\fP (aliased to \fIhtml5\fP) or \fIdocbook\fP (aliased to \fIdocbook5\fP). +Other values can be passed, but if Asciidoctor cannot resolve the backend to a converter, it will fail. Defaults to \fIhtml5\fP. -Other options can be passed, but if Asciidoctor cannot find the backend, it will fail during conversion. .RE .sp \fB\-d, \-\-doctype\fP=\fIDOCTYPE\fP .RS 4 -Document type: \fIarticle\fP, \fIbook\fP, \fImanpage\fP or \fIinline\fP. +Document type: \fIarticle\fP, \fIbook\fP, \fImanpage\fP, or \fIinline\fP. Sets the root element when using the \fIdocbook\fP backend and the style class on the HTML body element when using the \fIhtml\fP backend. The \fIbook\fP document type allows multiple level\-0 section titles in a single document. -The \fImanpage\fP document type enables parsing of metadata necessary to produce a manpage. +The \fImanpage\fP document type enables parsing of metadata necessary to produce a man page. The \fIinline\fP document type allows the content of a single paragraph to be formatted and returned without wrapping it in a containing element. Defaults to \fIarticle\fP. .RE -.SS "Rendering Control" -.sp -\fB\-C, \-\-compact\fP -.RS 4 -Compact the output by removing blank lines. -(No longer in use). -.RE +.SS "Document Conversion" .sp \fB\-D, \-\-destination\-dir\fP=\fIDIR\fP .RS 4 Destination output directory. -Defaults to the directory containing the source file, or the working directory if the source is read from a stream. +Defaults to the directory containing the source file or, if the source is read from a stream, the working directory. If specified, the directory is resolved relative to the working directory. .RE .sp @@ -122,16 +125,24 @@ \fB\-n, \-\-section\-numbers\fP .RS 4 Auto\-number section titles. -Synonym for \fB\-\-attribute numbered\fP. +Synonym for \fB\-\-attribute sectnums\fP. .RE .sp \fB\-o, \-\-out\-file\fP=\fIOUT_FILE\fP .RS 4 Write output to file \fIOUT_FILE\fP. Defaults to the base name of the input file suffixed with \fIbackend\fP extension. -If the input is read from standard input, then the output file defaults to stdout. -If \fIOUT_FILE\fP is \fI\-\fP then the standard output is also used. -If specified, the file is resolved relative to the working directory. +The file is resolved relative to the working directory. +If the input is read from standard input or a named pipe (fifo), then the output file defaults to stdout. +If \fIOUT_FILE\fP is \fI\-\fP, then the output file is written to standard output. +.RE +.sp +\fB\-R, \-\-source\-dir\fP=\fIDIR\fP +.RS 4 +Source directory. +Currently only used if the destination directory is also specified. +Used to preserve the directory structure of files converted within this directory in the destination directory. +If specified, the directory is resolved relative to the working directory. .RE .sp \fB\-r, \-\-require\fP=\fILIBRARY\fP @@ -142,7 +153,8 @@ .sp \fB\-s, \-\-no\-header\-footer\fP .RS 4 -Suppress the document header and footer in the output. +Output an embeddable document, which excludes the header, the footer, and everything outside the body of the document. +This option is useful for producing documents that can be inserted into an external template. .RE .sp \fB\-T, \-\-template\-dir\fP=\fIDIR\fP @@ -158,42 +170,54 @@ .RE .SS "Processing Information" .sp +\fB\-\-failure\-level\fP=\fILEVEL\fP +.RS 4 +The minimum logging level that triggers a non\-zero exit code (failure). +If this option is not set (default: FATAL), the program exits with exit code zero even if warnings or errors have been logged. +.RE +.sp \fB\-q, \-\-quiet\fP .RS 4 -Silence warnings. +Silence application log messages and script warnings. .RE .sp \fB\-\-trace\fP .RS 4 -Include backtrace information on errors. -Not enabled by default. +Include backtrace information when reporting errors. .RE .sp \fB\-v, \-\-verbose\fP .RS 4 -Verbosely print processing information and configuration file checks to stderr. +Verbosely print processing information to stderr, including debug\-level log messages. +.RE +.sp +\fB\-w, \-\-warnings\fP +.RS 4 +Turn on script warnings (applies to executed code). .RE .sp \fB\-t, \-\-timings\fP .RS 4 -Display timings information (time to read, parse and convert). +Print timings report to stderr (time to read, parse, and convert). .RE .SS "Program Information" .sp -\fB\-h, \-\-help\fP +\fB\-h, \-\-help\fP [\fITOPIC\fP] .RS 4 -Show the help message. +Print a help message. +Show the command usage if \fITOPIC\fP is not specified or recognized. +Dump the Asciidoctor man page (in troff/groff format) if \fITOPIC\fP is \fImanpage\fP. .RE .sp \fB\-V, \-\-version\fP .RS 4 Print program version number. .sp -\f[CR]\-v\fP can also be used if no other flags or arguments are present. +\fB\-v\fP can also be used if no source files are specified. .RE .SH "ENVIRONMENT" .sp -\fBAsciidoctor\fP honors the SOURCE_DATE_EPOCH environment variable. +\fBAsciidoctor\fP honors the \fBSOURCE_DATE_EPOCH\fP environment variable. If this variable is assigned an integer value, that value is used as the epoch of all input documents and as the local date and time. See \c .URL "https://reproducible\-builds.org/specs/source\-date\-epoch/" "" " " @@ -215,13 +239,13 @@ .URL "https://github.com/asciidoctor/asciidoctor/issues?q=is%3Aopen" "" "." .SH "AUTHORS" .sp -\fBAsciidoctor\fP was written by Dan Allen, Ryan Waldron, Jason Porter, Nick Hengeveld and other contributors. +\fBAsciidoctor\fP was written by Dan Allen, Ryan Waldron, Jason Porter, Nick Hengeveld, and other contributors. .sp \fBAsciiDoc\fP was written by Stuart Rackham and has received contributions from many other individuals. .SH "RESOURCES" .sp \fBProject web site:\fP \c -.URL "http://asciidoctor.org" "" "" +.URL "https://asciidoctor.org" "" "" .sp \fBGit source repository on GitHub:\fP \c .URL "https://github.com/asciidoctor/asciidoctor" "" "" @@ -233,11 +257,12 @@ .URL "http://discuss.asciidoctor.org" "" "" .SH "COPYING" .sp -Copyright (C) 2012\-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. +Copyright (C) 2012\-2019 Dan Allen, Ryan Waldron, and the Asciidoctor Project. Free use of this software is granted under the terms of the MIT License. -.SH "AUTHOR(S)" +.SH "AUTHORS" .sp -\fBDan Allen, Sarah White, Ryan Waldron\fP -.RS 4 -Author(s). -.RE +Dan Allen +.sp +Sarah White +.sp +Ryan Waldron \ No newline at end of file diff -Nru asciidoctor-1.5.5/man/asciidoctor.adoc asciidoctor-2.0.10/man/asciidoctor.adoc --- asciidoctor-1.5.5/man/asciidoctor.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/man/asciidoctor.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -1,13 +1,14 @@ = asciidoctor(1) Dan Allen; Sarah White; Ryan Waldron :doctype: manpage +:release-version: 2.0.10 :man manual: Asciidoctor Manual -:man source: Asciidoctor 1.5.5 +:man source: Asciidoctor {release-version} :page-layout: base == NAME -asciidoctor - converts AsciiDoc source files to HTML, DocBook and other formats +asciidoctor - converts AsciiDoc source files to HTML, DocBook, and other formats == SYNOPSIS @@ -15,7 +16,7 @@ == DESCRIPTION -The asciidoctor(1) command converts the AsciiDoc source file(s) _FILE_ to HTML5, DocBook 5, DocBook 4.5, man(ual) page and other custom output formats. +The asciidoctor(1) command converts the AsciiDoc source file(s) _FILE_ to HTML5, DocBook 5, man(ual) page, and other custom output formats. If _FILE_ is _-_ then the AsciiDoc source is read from standard input. @@ -25,55 +26,51 @@ *-B, --base-dir*=_DIR_:: Base directory containing the document and resources. - Defaults to the directory containing the source file, or the working directory if the source is read from a stream. - Can be used as a way to chroot the execution of the program. + Defaults to the directory containing the source file or, if the source is read from a stream, the working directory. + When combined with the safe mode setting, can be used to chroot the execution of the program. *-S, --safe-mode*=_SAFE_MODE_:: - Set safe mode level: _unsafe_, _safe_, _server_ or _secure_. + Set safe mode level: _unsafe_, _safe_, _server_, or _secure_. Disables potentially dangerous macros in source files, such as `include::[]`. If not set, the safe mode level defaults to _unsafe_ when Asciidoctor is invoked using this script. *--safe*:: Set safe mode level to _safe_. - Enables include macros, but restricts access to ancestor paths of source file. + Enables include directives, but prevents access to ancestor paths of source file. Provided for compatibility with the asciidoc command. If not set, the safe mode level defaults to _unsafe_ when Asciidoctor is invoked using this script. === Document Settings *-a, --attribute*=_ATTRIBUTE_:: - Define, override or delete a document attribute. - Command-line attributes take precedence over attributes defined in the source file. + Define, override, or unset a document attribute. + Command-line attributes take precedence over attributes defined in the source file unless either the name or value ends in _@_. + _ATTRIBUTE_ is normally formatted as a key-value pair, in the form _NAME=VALUE_. -Alternate acceptable forms are _NAME_ (where the _VALUE_ defaults to an empty string), _NAME!_ (unassigns the _NAME_ attribute) and _NAME=VALUE@_ (where _VALUE_ does not override value of _NAME_ attribute if it's already defined in the source document). +Alternate forms are _NAME_ (where the _VALUE_ defaults to an empty string), _NAME!_ (unsets the _NAME_ attribute), and _NAME=VALUE@_ (or _NAME@=VALUE_) (where _VALUE_ does not override the _NAME_ attribute if it's already defined in the source document). Values containing spaces should be enclosed in quotes. + This option may be specified more than once. *-b, --backend*=_BACKEND_:: - Backend output file format: _html5_, _docbook5_, _docbook45_ and _manpage_ are supported out of the box. + Backend output file format: _html5_, _docbook5_, _docbook45_, and _manpage_ are supported out of the box. You can also use the backend alias names _html_ (aliased to _html5_) or _docbook_ (aliased to _docbook5_). + Other values can be passed, but if Asciidoctor cannot resolve the backend to a converter, it will fail. Defaults to _html5_. - Other options can be passed, but if Asciidoctor cannot find the backend, it will fail during conversion. *-d, --doctype*=_DOCTYPE_:: - Document type: _article_, _book_, _manpage_ or _inline_. + Document type: _article_, _book_, _manpage_, or _inline_. Sets the root element when using the _docbook_ backend and the style class on the HTML body element when using the _html_ backend. The _book_ document type allows multiple level-0 section titles in a single document. - The _manpage_ document type enables parsing of metadata necessary to produce a manpage. + The _manpage_ document type enables parsing of metadata necessary to produce a man page. The _inline_ document type allows the content of a single paragraph to be formatted and returned without wrapping it in a containing element. Defaults to _article_. -=== Rendering Control - -*-C, --compact*:: - Compact the output by removing blank lines. - (No longer in use). +=== Document Conversion *-D, --destination-dir*=_DIR_:: Destination output directory. - Defaults to the directory containing the source file, or the working directory if the source is read from a stream. + Defaults to the directory containing the source file or, if the source is read from a stream, the working directory. If specified, the directory is resolved relative to the working directory. *-E, --template-engine*=_NAME_:: @@ -93,21 +90,28 @@ *-n, --section-numbers*:: Auto-number section titles. - Synonym for *--attribute numbered*. + Synonym for *--attribute sectnums*. *-o, --out-file*=_OUT_FILE_:: Write output to file _OUT_FILE_. Defaults to the base name of the input file suffixed with _backend_ extension. - If the input is read from standard input, then the output file defaults to stdout. - If _OUT_FILE_ is _-_ then the standard output is also used. - If specified, the file is resolved relative to the working directory. + The file is resolved relative to the working directory. + If the input is read from standard input or a named pipe (fifo), then the output file defaults to stdout. + If _OUT_FILE_ is _-_, then the output file is written to standard output. + +*-R, --source-dir*=_DIR_:: + Source directory. + Currently only used if the destination directory is also specified. + Used to preserve the directory structure of files converted within this directory in the destination directory. + If specified, the directory is resolved relative to the working directory. *-r, --require*=_LIBRARY_:: Require the specified library before executing the processor, using the standard Ruby require. This option may be specified more than once. *-s, --no-header-footer*:: - Suppress the document header and footer in the output. + Output an embeddable document, which excludes the header, the footer, and everything outside the body of the document. + This option is useful for producing documents that can be inserted into an external template. *-T, --template-dir*=_DIR_:: A directory containing custom converter templates that override one or more templates from the built-in set. @@ -121,32 +125,40 @@ === Processing Information +*--failure-level*=_LEVEL_:: + The minimum logging level that triggers a non-zero exit code (failure). + If this option is not set (default: FATAL), the program exits with exit code zero even if warnings or errors have been logged. + *-q, --quiet*:: - Silence warnings. + Silence application log messages and script warnings. *--trace*:: - Include backtrace information on errors. - Not enabled by default. + Include backtrace information when reporting errors. *-v, --verbose*:: - Verbosely print processing information and configuration file checks to stderr. + Verbosely print processing information to stderr, including debug-level log messages. + +*-w, --warnings*:: + Turn on script warnings (applies to executed code). *-t, --timings*:: - Display timings information (time to read, parse and convert). + Print timings report to stderr (time to read, parse, and convert). === Program Information -*-h, --help*:: - Show the help message. +*-h, --help* [_TOPIC_]:: + Print a help message. + Show the command usage if _TOPIC_ is not specified or recognized. + Dump the Asciidoctor man page (in troff/groff format) if _TOPIC_ is _manpage_. *-V, --version*:: Print program version number. + -`-v` can also be used if no other flags or arguments are present. +*-v* can also be used if no source files are specified. == ENVIRONMENT -*Asciidoctor* honors the SOURCE_DATE_EPOCH environment variable. +*Asciidoctor* honors the *SOURCE_DATE_EPOCH* environment variable. If this variable is assigned an integer value, that value is used as the epoch of all input documents and as the local date and time. See https://reproducible-builds.org/specs/source-date-epoch/ for more information about this environment variable. @@ -164,13 +176,13 @@ == AUTHORS -*Asciidoctor* was written by Dan Allen, Ryan Waldron, Jason Porter, Nick Hengeveld and other contributors. +*Asciidoctor* was written by Dan Allen, Ryan Waldron, Jason Porter, Nick Hengeveld, and other contributors. *AsciiDoc* was written by Stuart Rackham and has received contributions from many other individuals. == RESOURCES -*Project web site:* http://asciidoctor.org +*Project web site:* https://asciidoctor.org *Git source repository on GitHub:* https://github.com/asciidoctor/asciidoctor @@ -180,5 +192,5 @@ == COPYING -Copyright \(C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. +Copyright \(C) 2012-2019 Dan Allen, Ryan Waldron, and the Asciidoctor Project. Free use of this software is granted under the terms of the MIT License. diff -Nru asciidoctor-1.5.5/Rakefile asciidoctor-2.0.10/Rakefile --- asciidoctor-1.5.5/Rakefile 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/Rakefile 2019-08-18 16:11:54.000000000 +0000 @@ -1,144 +1,3 @@ -require File.expand_path '../lib/asciidoctor/version', __FILE__ - -def prepare_test_env - # rather than hardcoding gc settings in test task, - # could use https://gist.github.com/benders/788695 - ENV['RUBY_GC_MALLOC_LIMIT'] = 128_000_000.to_s - ENV['RUBY_GC_OLDMALLOC_LIMIT'] = 128_000_000.to_s - if RUBY_VERSION >= '2.1' - ENV['RUBY_GC_HEAP_INIT_SLOTS'] = 800_000.to_s - ENV['RUBY_GC_HEAP_FREE_SLOTS'] = 800_000.to_s - ENV['RUBY_GC_HEAP_GROWTH_MAX_SLOTS'] = 250_000.to_s - ENV['RUBY_GC_HEAP_GROWTH_FACTOR'] = 1.25.to_s - else - ENV['RUBY_FREE_MIN'] = 800_000.to_s - end -end - -begin - require 'rake/testtask' - Rake::TestTask.new(:test) do |test| - prepare_test_env - puts %(LANG: #{ENV['LANG']}) if ENV.key? 'TRAVIS_BUILD_ID' - test.libs << 'test' - test.pattern = 'test/**/*_test.rb' - test.verbose = true - test.warning = true - end - task :default => :test -rescue LoadError -end - -=begin -# Run tests with Encoding.default_external set to US-ASCII -begin - Rake::TestTask.new(:test_us_ascii) do |test| - prepare_test_env - puts "LANG: #{ENV['LANG']}" - test.libs << 'test' - test.pattern = 'test/**/*_test.rb' - test.ruby_opts << '-EUS-ASCII' if RUBY_VERSION >= '1.9' - test.verbose = true - test.warning = true - end -rescue LoadError -end -=end - -begin - require 'cucumber/rake/task' - Cucumber::Rake::Task.new(:features) do |t| - end -rescue LoadError -end - -def ci_setup_tasks - tasks = [] - begin - require 'ci/reporter/rake/minitest' - tasks << 'ci:setup:minitest' - # FIXME reporter for Cucumber tests not activating - #require 'ci/reporter/rake/cucumber' - #tasks << 'ci:setup:cucumber' - rescue LoadError - end if ENV['SHIPPABLE'] && RUBY_VERSION >= '1.9.3' - tasks -end - -desc 'Activates coverage and JUnit-style XML reports for tests' -task :coverage => ci_setup_tasks do - # exclude coverage run for Ruby 1.8.7 or (disabled) if running on Travis CI - ENV['COVERAGE'] = 'true' if RUBY_VERSION >= '1.9.3' # && (ENV['SHIPPABLE'] || !ENV['TRAVIS_BUILD_ID']) - ENV['CI_REPORTS'] = 'shippable/testresults' - ENV['COVERAGE_REPORTS'] = 'shippable/codecoverage' -end - -namespace :test do - desc 'Run unit and feature tests' - task :all => [:test,:features] -end - -=begin -begin - require 'rdoc/task' - RDoc::Task.new do |rdoc| - rdoc.rdoc_dir = 'rdoc' - rdoc.title = "Asciidoctor #{Asciidoctor::VERSION}" - rdoc.markup = 'tomdoc' if rdoc.respond_to?(:markup) - rdoc.rdoc_files.include('LICENSE.adoc', 'lib/**/*.rb') - end -rescue LoadError -end -=end - -begin - require 'yard' - require 'yard-tomdoc' - require './lib/asciidoctor' - require './lib/asciidoctor/extensions' - - # Prevent YARD from breaking command statements in literal paragraphs - class CommandBlockPostprocessor < Asciidoctor::Extensions::Postprocessor - def process document, output - output.gsub(/
    \$ (.+?)<\/pre>/m, '
    $ \1
    ') - end - end - Asciidoctor::Extensions.register do - postprocessor CommandBlockPostprocessor - end - - # register .adoc extension for AsciiDoc markup helper - YARD::Templates::Helpers::MarkupHelper::MARKUP_EXTENSIONS[:asciidoc] = %w(adoc) - YARD::Rake::YardocTask.new do |yard| - yard.files = %w( - lib/**/*.rb - - - CHANGELOG.adoc - LICENSE.adoc - ) - # --no-highlight enabled to prevent verbatim blocks in AsciiDoc that begin with $ from being dropped - # need to patch htmlify method to not attempt to syntax highlight blocks (or fix what's wrong) - yard.options = (IO.readlines '.yardopts').map {|l| l.chomp.delete('"').split ' ', 2 }.flatten - end -rescue LoadError -end - -begin - require 'bundler/gem_tasks' - - # Enhance the release task to create an explicit commit for the release - #Rake::Task[:release].enhance [:commit_release] - - # NOTE you don't need to push after updating version and committing locally - # WARNING no longer works; it's now necessary to get master in a state ready for tagging - task :commit_release do - Bundler::GemHelper.new.send(:guard_clean) - sh "git commit --allow-empty -a -m 'Release #{Asciidoctor::VERSION}'" - end -rescue LoadError -end - -desc 'Open an irb session preloaded with this library' -task :console do - sh 'bundle console', :verbose => false -end +# frozen_string_literal: true +Dir.glob('tasks/*.rake').each {|file| load file } +task default: %w(test:all) diff -Nru asciidoctor-1.5.5/README.adoc asciidoctor-2.0.10/README.adoc --- asciidoctor-1.5.5/README.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/README.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -1,19 +1,33 @@ = Asciidoctor Dan Allen ; Sarah White ; Ryan Waldron -v1.5.5, 2016-10-05 +v2.0.10, 2019-05-31 // settings: -:page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} -ifdef::env-github[:status:] +ifndef::env-github[:icons: font] +ifdef::env-github[] +:status: +:outfilesuffix: .adoc +:caution-caption: :fire: +:important-caption: :exclamation: +:note-caption: :paperclip: +:tip-caption: :bulb: +:warning-caption: :warning: +endif::[] +// Variables: +:release-version: 2.0.10 // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js -:uri-project: http://asciidoctor.org +:uri-gradle-plugin: {uri-org}/asciidoctor-gradle-plugin +:uri-maven-plugin: {uri-org}/asciidoctor-maven-plugin +:uri-asciidoclet: {uri-org}/asciidoclet +:uri-project: https://asciidoctor.org +:uri-gem: https://rubygems.org/gems/asciidoctor ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news @@ -22,13 +36,13 @@ :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: -ifdef::env-site[] +ifdef::env-site,env-yard[] :uri-rel-file-base: {uri-repo}/blob/master/ :uri-rel-tree-base: {uri-repo}/tree/master/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc -:uri-license: {uri-rel-file-base}LICENSE.adoc +:uri-license: {uri-rel-file-base}LICENSE :uri-tests: {uri-rel-tree-base}test :uri-discuss: http://discuss.asciidoctor.org :uri-irc: irc://irc.freenode.org/#asciidoctor @@ -37,57 +51,69 @@ :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain -:uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx -:uri-render-doc: {uri-docs}/render-documents +:uri-install-macos-doc: {uri-docs}/install-asciidoctor-macos +:uri-convert-doc: {uri-docs}/convert-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html -:uri-foundation: http://foundation.zurb.com +:uri-foundation: https://foundation.zurb.com +:uri-opal: https://opalrb.com :uri-tilt: https://github.com/rtomayko/tilt -:uri-ruby: https://ruby-lang.org +:uri-ruby: https://www.ruby-lang.org // images: :image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png -{uri-project}/[Asciidoctor] is a _fast_ text processor and publishing toolchain for converting {uri-what-is-asciidoc}[AsciiDoc] content to HTML5, DocBook 5 (or 4.5) and other formats. -Asciidoctor is written in Ruby, packaged as a RubyGem and published to {uri-rubygem}[RubyGems.org]. -The gem is also included in several Linux distributions, including Fedora, Debian and Ubuntu. -Asciidoctor is open source, {uri-repo}[hosted on GitHub] and released under {uri-license}[the MIT license]. - -ifndef::env-site[] -.Translations of this document are available in the following languages: -* {uri-rel-file-base}README-zh_CN.adoc[汉语] -* {uri-rel-file-base}README-fr.adoc[Français] -* {uri-rel-file-base}README-jp.adoc[日本語] +{uri-project}[Asciidoctor] is a _fast_, {uri-license}[open source] text processor and publishing toolchain for converting {uri-what-is-asciidoc}[AsciiDoc] content to HTML5, DocBook, PDF, and other formats. +Asciidoctor is written in Ruby and runs on all major operating systems. +The Asciidoctor project is {uri-repo}[hosted on GitHub]. + +To simplify installation, Asciidoctor is packaged and distributed as a RubyGem (aka gem) to {uri-rubygem}[RubyGems.org]. +It's also distributed as a package for popular Linux distributions and macOS. +In addition to running on Ruby, Asciidoctor can be executed on a JVM using {uri-asciidoctorj}[AsciidoctorJ] or in any JavaScript environment (including the browser) using {uri-asciidoctorjs}[Asciidoctor.js]. + +ifndef::env-site,env-yard[] +This document is also available in the following languages: + +{uri-rel-file-base}README-zh_CN.adoc[汉语] +| +{uri-rel-file-base}README-de.adoc[Deutsch] +| +{uri-rel-file-base}README-fr.adoc[Français] +| +{uri-rel-file-base}README-jp.adoc[日本語] endif::[] .Key documentation [.compact] -* {uri-docs}/what-is-asciidoc[What is Asciidoc?] +* {uri-docs}/what-is-asciidoc[What is AsciiDoc?] * {uri-docs}/asciidoc-writers-guide[AsciiDoc Writer's Guide] -* {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc Syntax Reference] * {uri-docs}/user-manual[Asciidoctor User Manual] +* {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc Syntax Reference] -.Where Ruby goes, Asciidoctor follows -**** -You can run Asciidoctor on the JVM using JRuby. -To invoke the Asciidoctor API directly from Java and other JVM languages, use {uri-asciidoctorj}[AsciidoctorJ]. -There are plugins available, based on AsciidoctorJ, that integrate the Asciidoctor processor into Apache Maven, Gradle or Javadoc builds. +ifdef::status[] +//.*Project health* +image:https://img.shields.io/gem/v/asciidoctor.svg[Latest Release, link={uri-gem}] +image:https://img.shields.io/badge/rubydoc.info-{release-version}-blue.svg[library (API) docs,link=https://www.rubydoc.info/gems/asciidoctor/{release-version}] +image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI),link=https://travis-ci.org/asciidoctor/asciidoctor] +image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor),link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] +//image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status,link=https://coveralls.io/r/asciidoctor/asciidoctor] +//image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate,link=https://codeclimate.com/github/asciidoctor/asciidoctor] +image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs,link=https://inch-ci.org/github/asciidoctor/asciidoctor] +endif::[] -Asciidoctor also runs in JavaScript. -We use http://opalrb.org[Opal] to transcompile the Ruby source to JavaScript to produce {uri-asciidoctorjs}[Asciidoctor.js], a fully-functional version of Asciidoctor that works in any JavaScript environment, such as a web browser or Node.js. -Asciidoctor.js is used to power the AsciiDoc preview extensions for Chrome, Atom, Brackets and other web-based tooling. -**** +== Sponsors -ifdef::status[] -.*Project health* -image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI), link=https://travis-ci.org/asciidoctor/asciidoctor] -image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor), link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] -//image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status, link=https://coveralls.io/r/asciidoctor/asciidoctor] -image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] -image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs, link="https://inch-ci.org/github/asciidoctor/asciidoctor"] +We want to recognize our {uri-project}/supporters[sponsors] for their commitment to improving the state of technical documentation by supporting this project. +Thank you sponsors! +Without your generous support, Asciidoctor would not be possible. + +ifndef::env-site,env-yard[] +Major funding for Asciidoctor is provided by our *Change Maker*, https://opendevise.com[OpenDevise], our *Strategy Sponsors*, https://www.khronos.org/[Khronos Group] and Linda Roberts, and our *Pull Request Backers*, Brian Dominick, Guillaume Grossetie, and Abel Salgado Romero. +Additional funding is provided by the supporters listed on our {uri-project}/supporters[Community Backers] page. endif::[] +You can support this project by becoming a sponsor through https://opencollective.com/asciidoctor[OpenCollective]. + == The Big Picture Asciidoctor reads content written in plain text, as shown in the panel on the left in the image below, and converts it to HTML5, as shown rendered in the right panel. @@ -97,27 +123,35 @@ == AsciiDoc Processing -Asciidoctor reads and parses text written in the AsciiDoc syntax, then feeds the parse tree to a set of built-in converters to produce HTML5, DocBook 5 (or 4.5) or man(ual) page output. +Asciidoctor reads and parses text written in the AsciiDoc syntax, then feeds the parse tree to a set of built-in converters to produce HTML5, DocBook 5, and man(ual) page output. You have the option of using your own converter or loading {uri-tilt}[Tilt]-supported templates to customize the generated output or produce additional formats. -NOTE: Asciidoctor is a drop-in replacement for the original AsciiDoc Python processor (`asciidoc.py`). -The Asciidoctor test suite has {uri-tests}[> 1,600 tests] to ensure compatibility with the AsciiDoc syntax. +Asciidoctor is a drop-in replacement for its predecessor, AsciiDoc Python (`asciidoc.py`). +The Asciidoctor test suite has {uri-tests}[> 2,350 tests] to ensure compatibility with the AsciiDoc syntax. In addition to the classic AsciiDoc syntax, Asciidoctor recognizes additional markup and formatting options, such as font-based icons (e.g., `+icon:fire[]+`) and UI elements (e.g., `+button:[Save]+`). Asciidoctor also offers a modern, responsive theme based on {uri-foundation}[Foundation] to style the HTML5 output. +== Where Ruby goes, Asciidoctor follows + +You can run Asciidoctor on the JVM using JRuby. +To invoke the Asciidoctor API directly from Java and other JVM languages, use {uri-asciidoctorj}[AsciidoctorJ]. +There are plugins available for {uri-maven-plugin}[Apache Maven], {uri-gradle-plugin}[Gradle], and {uri-asciidoclet}[Javadoc], which allow you to integrate AsciiDoc processing directly into your build using AsciidoctorJ. + +Asciidoctor also runs in JavaScript. +{uri-opal}[Opal] is used to transcompile the Ruby source to JavaScript to produce {uri-asciidoctorjs}[Asciidoctor.js]. +Asciidoctor.js is a fully-functional version of Asciidoctor that works in any JavaScript environment, such as a web browser or Node.js. +It's used to power the AsciiDoc preview extensions for Chrome, Atom, Brackets and other web-based tooling. + == Requirements -Asciidoctor works on Linux, OS X (Mac) and Windows and requires one of the following implementations of {uri-ruby}[Ruby]: +Asciidoctor works on Linux, macOS and Windows and requires one of the following implementations of {uri-ruby}[Ruby]: -* MRI (Ruby 1.8.7, 1.9.3, 2.0, 2.1, 2.2 & 2.3) -* JRuby (1.7 in Ruby 1.8 and 1.9 modes, 9000) -* Rubinius 2.2.x +* CRuby (aka MRI) 2.3 - 2.6 +* JRuby 9.1 - 9.2 +* TruffleRuby (GraalVM) * Opal (JavaScript) -We welcome your help testing Asciidoctor on these and other platforms. -Refer to the <> section to learn how to get involved. - [CAUTION] ==== If you're using a non-English Windows environment, you may bump into an `Encoding::UndefinedConversionError` when invoking Asciidoctor. @@ -132,15 +166,88 @@ == Installation -Asciidoctor can be installed using (a) the `gem install` command, (b) Bundler or (c) package managers for popular Linux distributions. +Asciidoctor can be installed using (a) package managers for popular Linux distributions, (b) Homebrew for macOS, (c) the `gem install` command (recommended for Windows users), (d) the Asciidoctor Docker image, or (e) Bundler. + +The benefit of using your operating system's package manager to install the gem is that it handles installing Ruby and the RubyGems library if those packages are not already installed on your machine. + +=== (a) Linux package managers + +The version of Asciidoctor installed by the package manager may not match the latest release of Asciidoctor. +Consult the package repository for your distribution to find out which version is packaged per distribution release. + +* https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] +* https://www.archlinux.org/packages/?name=asciidoctor[Arch Linux (asciidoctor)] +* https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] +* https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] +* https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] +* https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] + +If you want to use a version of Asciidoctor that is newer than what is installed by the package manager, see the <>. + +==== apk (Alpine Linux) + +To install the gem on Alpine Linux, open a terminal and type: + + $ sudo apk add asciidoctor + +==== pacman (Arch Linux) + +To install the gem on Arch-based distributions, open a terminal and type: + + $ sudo pacman -S asciidoctor + +==== APT + +On Debian and Debian-based distributions such as Ubuntu, use APT to install Asciidoctor. +To install the package, open a terminal and type: + + $ sudo apt-get install -y asciidoctor + +==== DNF + +On RPM-based Linux distributions, such as Fedora, CentOS, and RHEL, use the DNF package manager to install Asciidoctor. +To install the package, open a terminal and type: + + $ sudo dnf install -y asciidoctor + +=== (b) Homebrew (macOS) + +You can use Homebrew, the macOS package manager, to install Asciidoctor. +If you don’t have Homebrew on your computer, complete the installation instructions at https://brew.sh/[brew.sh] first. +Once Homebrew is installed, you’re ready to install the `asciidoctor` gem. +Open a terminal and type: + + $ brew install asciidoctor + +Homebrew installs the `asciidoctor` gem into an exclusive prefix that's independent of system gems. + +=== (c) Windows + +To use Asciidoctor with Windows, you have two easy options. + +==== Chocolatey -TIP: The benefit of using a Linux package manager to install the gem is that it handles installing Ruby and the RubyGems library if those packages are not already installed on your machine. -The drawback is that the package may not be available immediately after the release of the gem. -If you need the latest version, you can always fallback to using the `gem` command. +When you already use https://chocolatey.org[chocolatey] on your machine, you can use: -=== (a) gem install +[source] +---- +choco install ruby +---- + +Then follow <>. -Open a terminal and type (excluding the leading `$`): +==== Rubyinstaller + +Or you use the https://rubyinstaller.org/downloads/[Rubyinstaller], download the package for your Windows Version and after the installation go ahead with <>. + +[#gem-install] +=== (d) gem install + +Before installing Asciidoctor using `gem install`, you should use https://rvm.io[RVM] to install Ruby in your home directory (i.e., user space). +Then, you can safely use the `gem` command to install or update the Asciidoctor gem. +When using RVM, gems are installed in a location isolated from the system. + +Open a terminal and type: $ gem install asciidoctor @@ -148,30 +255,21 @@ $ gem install asciidoctor --pre -.Upgrading your installation -[TIP] -==== -If you have an earlier version of Asciidoctor installed, you can update it using: +=== (e) Docker - $ gem update asciidoctor +See {uri-install-docker}[Installing Asciidoctor using Docker]. -If you install a new version of the gem using `gem install` instead of gem update, you'll have multiple versions installed. -If that's the case, use the following gem command to remove the old versions: - - $ gem cleanup asciidoctor -==== - -=== (b) Bundler +=== (f) Bundler . Create a Gemfile in the root folder of your project (or the current directory) . Add the `asciidoctor` gem to your Gemfile as follows: + -[source] +[source,subs=attributes+] ---- source 'https://rubygems.org' gem 'asciidoctor' # or specify the version explicitly -# gem 'asciidoctor', '1.5.5' +# gem 'asciidoctor', '{release-version}' ---- . Save the Gemfile @@ -180,70 +278,48 @@ $ bundle To upgrade the gem, specify the new version in the Gemfile and run `bundle` again. -Using `bundle update` is *not* recommended as it will also update other gems, which may not be the desired result. +Using `bundle update` (without specifying a gem) is *not* recommended as it will also update other gems, which may not be the desired result. -=== (c) Linux package managers +== Upgrade -==== DNF (Fedora 21 or greater) +If you installed Asciidoctor using a package manager, your operating system is probably configured to automatically update packages, in which case you don't need to update the gem manually. -To install the gem on Fedora 21 or greater using dnf, open a terminal and type: - - $ sudo dnf install -y asciidoctor +=== apk (Alpine Linux) To upgrade the gem, use: - $ sudo dnf update -y asciidoctor - -TIP: Your system may be configured to automatically update rpm packages, in which case no action is required by you to update the gem. - -==== apt-get (Debian, Ubuntu, Mint) - -To install the gem on Debian, Ubuntu or Mint, open a terminal and type: + $ sudo apk add -u asciidoctor - $ sudo apt-get install -y asciidoctor +=== APT To upgrade the gem, use: $ sudo apt-get upgrade -y asciidoctor -TIP: Your system may be configured to automatically update deb packages, in which case no action is required by you to update the gem. - -The version of Asciidoctor installed by the package manager (apt-get) may not match the latest release of Asciidoctor. -Consult the package repository for your distribution to find out which version is packaged per distribution release. +=== DNF -* https://packages.debian.org/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[asciidoctor package by Debian release] -* http://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[asciidoctor package by Ubuntu release] -* https://community.linuxmint.com/software/view/asciidoctor[asciidoctor package by Mint release] +To upgrade the gem, use: -[CAUTION] -==== -You're advised against using the `gem update` command to update a gem managed by the package manager. -Doing so puts the system into an inconsistent state as the package manager can no longer track the files (which get installed under /usr/local). -Simply put, system gems should only be managed by the package manager. + $ sudo dnf update -y asciidoctor -If you want to use a version of Asciidoctor that is newer than what is installed by the package manager, you should use http://rvm.io[RVM] to install Ruby in your home directory (i.e., user space). -Then, you can safely use the `gem` command to install or update the Asciidoctor gem. -When using RVM, gems are installed in a location isolated from the system. -==== +=== Homebrew (macOS) -==== apk (Alpine Linux) +To upgrade the gem, use: -To install the gem on Alpine Linux, open a terminal and type: + $ brew update + $ brew upgrade asciidoctor - $ sudo apk add asciidoctor +=== gem install -To upgrade the gem, use: +If you previously installed Asciidoctor using the `gem` command, you'll need to manually upgrade Asciidoctor when a new version is released. +You can upgrade the gem by typing: - $ sudo apk add -u asciidoctor - -TIP: Your system may be configured to automatically update apk packages, in which case no action is required by you to update the gem. + $ gem install asciidoctor -=== Other installation options +When you install a new version of the gem using `gem install`, you end up with multiple versions installed. +Use the following command to remove the old versions: -* {uri-install-docker}[Installing Asciidoctor using Docker] -* {uri-install-osx-doc}[Installing Asciidoctor on Mac OS X] -// at the moment, the following entry is just a reiteration of the information in this README -//* {uri-install-doc}[Installing the Asciidoctor toolchain] + $ gem cleanup asciidoctor == Usage @@ -254,10 +330,10 @@ You should see information about the Asciidoctor version and your Ruby environment printed in the terminal. -[.output] +[.output,subs=attributes+] .... -Asciidoctor 1.5.5 [http://asciidoctor.org] -Runtime Environment (ruby 2.2.2p95 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) +Asciidoctor {release-version} [https://asciidoctor.org] +Runtime Environment (ruby 2.6.0p0 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... Asciidoctor also provides an API. @@ -284,7 +360,7 @@ Refer to the following resources to learn more about how to use the `asciidoctor` command. -* {uri-render-doc}[How do I convert a document?] +* {uri-convert-doc}[How do I convert a document?] * {uri-themes-doc}[How do I use the Asciidoctor stylesheet factory to produce custom themes?] === Ruby API @@ -307,7 +383,7 @@ [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- @@ -315,7 +391,7 @@ [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- @@ -323,7 +399,7 @@ [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert @@ -340,9 +416,8 @@ == Contributing -In the spirit of {uri-freesoftware}[free software], _everyone_ is encouraged to help improve this project. -If you discover errors or omissions in the source code, documentation, or website content, please don't hesitate to submit an issue or open a pull request with a fix. New contributors are always welcome! +If you discover errors or omissions in the source code, documentation, or website content, please don't hesitate to submit an issue or open a pull request with a fix. Here are some ways *you* can contribute: @@ -350,8 +425,7 @@ * by reporting bugs * by suggesting new features * by writing or editing documentation -* by writing specifications -* by writing code -- _No patch is too small._ +* by writing code with tests -- _No patch is too small._ ** fix typos ** add comments ** clean up inconsistent whitespace @@ -364,21 +438,18 @@ == Getting Help -The Asciidoctor project is developed to help you easily write and publish your content. +Asciidoctor is developed to help you easily write and publish your content. But we can't do it without your feedback! We encourage you to ask questions and discuss any aspects of the project on the discussion list, on Twitter or in the chat room. -Discussion list (Nabble):: {uri-discuss} -Twitter:: #asciidoctor hashtag or @asciidoctor mention Chat (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] -//// -Chat (IRC):: {uri-irc}[#asciidoctor] on FreeNode IRC -//// +Discussion list (Nabble):: {uri-discuss} +Twitter:: https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] hashtag or https://twitter.com/asciidoctor[@asciidoctor] mention ifdef::env-github[] Further information and documentation about Asciidoctor can be found on the project's website. -{uri-project}/[Home] | {uri-news}[News] | {uri-docs}[Docs] +{uri-project}[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] The Asciidoctor organization on GitHub hosts the project's source code, issue tracker, and sub-projects. @@ -387,218 +458,26 @@ Issue tracker:: {uri-issues} Asciidoctor organization on GitHub:: {uri-org} -== Copyright and Licensing +== License -Copyright (C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. -Free use of this software is granted under the terms of the MIT License. +Copyright (C) 2012-2019 Dan Allen, Sarah White, Ryan Waldron, and the individual contributors to Asciidoctor. +Use of this software is granted under the terms of the MIT License. -See the {uri-license}[LICENSE] file for details. +See the {uri-license}[LICENSE] for the full license text. == Authors -*Asciidoctor* is led by https://github.com/mojavelinux[Dan Allen] and https://github.com/graphitefriction[Sarah White] and has received contributions from {uri-contributors}[many other individuals] in Asciidoctor's awesome community. +*Asciidoctor* is led by https://github.com/mojavelinux[Dan Allen] and https://github.com/graphitefriction[Sarah White] and has received contributions from {uri-contributors}[many individuals] in Asciidoctor's awesome community. The project was initiated in 2012 by https://github.com/erebor[Ryan Waldron] and based on {uri-prototype}[a prototype] written by https://github.com/nickh[Nick Hengeveld]. -*AsciiDoc* was started by Stuart Rackham and has received contributions from many other individuals in the AsciiDoc community. +*AsciiDoc* was started by Stuart Rackham and has received contributions from many individuals in the AsciiDoc community. +ifndef::env-site[] == Changelog -== 1.5.5 (2016-10-05) - @mojavelinux - -Enhancements:: - * Add preference to limit the maximum size of an attribute value (#1861) - * Honor SOURCE_DATE_EPOCH environment variable to accomodate reproducible builds (@JojoBoulix) (#1721) - * Add reversed attribute to ordered list if reversed option is enabled (#1830) - * Add support for additional docinfo locations (e.g., :header) - * Configure default stylesheet to break monospace word if exceeds length of line; add roles to prevent breaks (#1814) - * Introduce translation file for built-in labels (@ciampix) - * Provide translations for built-in labels (@JmyL - kr, @ciampix - it, @ivannov - bg, @maxandersen - da, @radcortez - pt, @eddumelendez - es, @leathersole - jp, @aslakknutsen - no, @shahryareiv - fa, @AlexanderZobkov - ru, @dongwq - zh, @rmpestano - pt_BR, @ncomet - fr, @lgvz - fi, @patoi - hu, @BojanStipic - sr, @fwilhe - de, @rahmanusta - tr, @abelsromero - ca, @aboullaite - ar, @roelvs - nl) - * Translate README to Chinese (@diguage) - * Translate README to Japanese (@Mizuho32) - -Improvements:: - * Style nested emphasized phrases properly when using default stylesheet (#1691) - * Honor explicit table width even when autowidth option is set (#1843) - * Only explicit noheader option on table should disable implicit table header (#1849) - * Support docbook orient="land" attribute on tables (#1815) - * Add alias named list to retrieve parent List of ListItem - * Update push_include method to support chaining (#1836) - * Enable font smoothing on Firefox on OSX (#1837) - * Support combined use of sectanchors and sectlinks in HTML5 output (#1806) - * fix API docs for find_by - * Upgrade to Font Awesome 4.6.3 (@allenan, @mogztter) (#1723) - * README: add install instructions for Alpine Linux - * README: Switch yum commands to dnf in README - * README: Mention Mint as a Debian distro that packages Asciidoctor - * README: Add caution advising against using gem update to update a system-managed gem (@oddhack) - * README: sync French version with English version (@flashcode) - * Add missing endline after title element when converting open block to HTML - * Move list_marker_keyword method from AbstractNode to AbstractBlock - * Rename definition list to description list internally - -Compliance:: - * Support 6-digit decimal char refs, 5-digit hexidecimal char refs (#1824) - * Compatibility fixes for Opal - * Check for number using Integer instead of Fixnum class for compatibility with Ruby 2.4 - -Bug fixes:: - * Use method_defined? instead of respond_to? to check if method is already defined when patching (#1838) - * Fix invalid conditional in HTML5 converter when handling of SVG - * Processor#parse_content helper no longer shares attribute list between blocks (#1651) - * Fix infinite loop if unordered list marker is immediately followed by a dot (#1679) - * Don't break SVG source when cleaning if svg start tag name is immediately followed by endline (#1676) - * Prevent template converter from crashing if .rb file found in template directory (#1827) - * Fix crash when generating section ID when both idprefix & idseparator are blank (#1821) - * Use stronger CSS rule for general text color in Pygments stylesheet (#1802) - * Don't duplicate forward slash for path relative to root (#1822) - -Infrastructure:: - * Build gem properly in the absense of a git workspace, make compatible with JRuby (#1779) - * Run tests in CI using latest versions of Ruby, including Ruby 2.3 (@ferdinandrosario) - -== 1.5.4 (2016-01-03) - @mojavelinux - -Enhancements:: - * translate README into French (@anthonny, @mogztter, @gscheibel, @mgreau) (#1630) - * allow linkstyle in manpage output to be configured (#1610) - -Improvements:: - * upgrade to MathJax 2.6.0 and disable loading messages - * upgrade to Font Awesome 4.5.0 - * disable toc if document has no sections (#1633) - * convert inline asciimath to MathML (using asciimath gem) in DocBook converter (#1622) - * add attribute to control build reproducibility (@bk2204) (#1453) - * recognize \file:/// as a file root in Opal browser env (#1561) - * honor icon attribute on admonition block when font-based icons are enabled (@robertpanzer) (#1593) - * resolve custom icon relative to iconsdir; add file extension if absent (#1634) - * allow asciidoctor cli to resolve library path when invoked without leading ./ - -Compliance:: - * allow special section to be nested at any depth (#1591) - * ensure colpcwidth values add up to 100%; increase precision of values to 4 decimal places (#1647) - * ignore blank cols attribute on table (#1647) - * support shorthand syntax for block attributes on document title (#1650) - -Bug fixes:: - * don't include default toc in AsciiDoc table cell; don't pass toc location attributes to nested document (#1582) - * guard against nil dlist list item in find_by (#1618) - * don't swallow trailing line when include file is not readable (#1602) - * change xlink namespace to xl in DocBook 5 output to prevent parse error (#1597) - * make callouts globally unique within document, including AsciiDoc table cells (#1626) - * initialize Slim-related attributes regardless of when Slim was loaded (@terceiro) (#1576) - * differentiate literal backslash from escape sequence in manpage output (@ds26gte) (#1604) - * don't mistake line beginning with \. for troff macro in manpage output (@ds26gte) (#1589) - * escape leading dots so user content doesn't trigger troff macros in manpage output (@ds26gte) (#1631) - * use \c after .URL macro to remove extraneous space in manpage output (@ds26gte) (#1590) - * fix missing endline after .URL macro in manpage output (#1613) - * properly handle spacing around .URL/.MTO macro in manpage output (@ds26gte) (#1641) - * don't swallow doctitle attribute followed by block title (#1587) - * change strategy for splitting names of author; fixes bug in Opal/Asciidoctor.js - * don't fail if library is loaded more than once - -Infrastructure:: - * remove trailing endlines in project source code - * update contributing guidelines - * explicitly test ifeval scenario raised in issue #1585 - * remove backreference substitution hack for Opal/Asciidoctor.js - * fix assignment of default Hash value for Opal/Asciidoctor.js - * add JRuby 9.0.4.0 and Ruby 2.3.0 to the Travis CI build matrix - -== 1.5.3 (2015-10-31) - @mojavelinux - -Enhancements:: - * add support for interactive & inline SVGs (#1301, #1224) - * add built-in manpage backend (@davidgamba) (#651) - * create Mallard backend; asciidoctor/asciidoctor-mallard (@bk2204) (#425) - * add AsciiMath to MathML converter to support AsciiMath in DocBook converter (@pepijnve) (#954) - * allow text of selected lines to be highlighted in source block by Pygments or CodeRay (#1429) - * use value of `docinfo` attribute to control docinfo behavior (#1510) - * add `docinfosubs` attribute to control which substitutions are performed on docinfo files (@mogztter) (#405) - * drop ability to specify multiple attributes with a single `-a` flag when using the CLI (@mogztter) (#405) - * make subtitle separator chars for document title configurable (@rmannibucau) (#1350) - * make XrefInlineRx regexp more permissive (Mathieu Boespflug) (#844) - -Improvements:: - * load JavaScript and CSS at bottom of HTML document (@mogztter) (#1238) - * list available backends in help text (@plaindocs) (#1271) - * properly expand tabs in literal text (#1170, #841) - * add `source-indent` as document attribute (@mogztter) (#1169) - * upgrade MathJax to 2.5.3 (#1329) - * upgrade Font Awesome to 4.4.0 (@mogztter) (#1465) - * upgrade highlight.js to 8.6 (now 8.9.1) (#1390) - * don't abort if syntax highlighter isn't available (#1253) - * insert docinfo footer below footer div (#1503) - * insert toc at default location in embeddable HTML (#1443) - * replace _ and - in generated alt text for inline images - * restore attributes to header attributes after parse (#1255) - * allow docdate and doctime to be overridden (#1495) - * add CSS class `.center` for center block alignment (#1456) - * recognize U+2022 (bullet) as alternative marker for unordered lists (@mogztter) (#1177) - * allow videos to work for local files by prepending asset-uri-scheme (Chris) (#1320) - * always assign playlist param when loop option is enabled for YouTube video - * parse isolated version in revision line (@bk2204) (#790) - * autoload Tilt when template converter is instantiated (#1313) - * don't overwrite existing id entry in references table (#1256) - * use outfilesuffix attribute defined in header when resolving outfile (#1412) - * make AsciiDoc safe mode option on Slim engine match document (#1347) - * honor htmlsyntax attribute when backend is html/html5 (#1530) - * tighten spacing of wrapped lines in TOC (#1542) - * tune padding around table cells in horizontal dlist (#1418) - * load Droid Sans Mono 700 in default stylesheet - * set line height of table cells used for syntax highlighting - * set font-family of kbd; refine styling (#1423) - * extract condition into `quote_lines?` method (@mogztter) - * extract inline code into `read_paragraph` method (@mogztter) - * parent of block in ListItem should be ListItem (#1359) - * add helper methods to List and ListItem (#1551) - * add method `AbstractNode#add_role` and `AbstractNode#remove_role` (@robertpanzer) (#1366) - * introduce helper methods for sniffing URIs (#1422) - * add helper to calculate basename without file extension - * document `-I` and `-r` options in the manual page (@bk2204) - * fix `+--help+` output text for `-I` (@bk2204) - * don't require open-uri-cached if already loaded - * do not attempt to scan pattern of non-existent directory in template converter - * prevent CodeRay from bolding every 10th line number - -Compliance:: - * use `` for footnote reference in text instead of `` (#1523) - * fix alignment of wrapped text in footnote (#1524) - * include full stop after footnote number in embeddable HTML - * show manpage title & name section in embeddable HTML (#1179) - * resolve missing attribute in ifeval to empty string (#1387) - * support unbreakable & breakable options on table (rockyallen) (#1140) - -Bug fixes:: - * don't truncate exception stack in `Asciidoctor.load` (#1248) - * don't fail to save cause of Java exception (@robertpanzer) (#1458) - * fix precision error in timings report (#1342) - * resolve regexp for inline macro lazily (#1336) - * block argument to `find_by` should filter results (#1393) - * strip comment lines in indented text of dlist item (#1537) - * preserve escaped delimiter at end of line in a table (#1306) - * correctly calculate colnames for implicit columns (#1556) - * don't crash if colspan exceeds colspec (#1460) - * account for empty records in colspec (#1375) - * ignore empty cols attribute on table - * use `.inspect` to print MathJax delimiters (again) (#1198) - * use while loop instead of begin/while loop to address bug in Asciidoctor.js (#1408) - * force encoding of attribute values passed from cli (#1191) - * don't copy css if stylesheet or stylesdir is a URI (#1400) - * fix invalid color value in default CodeRay theme - * built-in writer no longer fails if output is nil (#1544) - * custom template engine options should take precedence - * fallback to require with a non-relative path to support Debian package (@mogztter) - * pass opts to recursive invocations of `PathResolver#system_path` - * fix and test external links in docbook backend - * use format symbol `:html` instead of `:html5` for Slim to fix warnings - * fix documentation for inline_macro and block_macro (Andrea Bedini) - * fix grammar in warning messages regarding thread_safe gem - -Infrastructure:: - * migrate opal_ext from core to Asciidoctor.js (#1517) - * add Ruby 2.2 to CI build; only specify minor Ruby versions - * enable containerized builds on Travis CI - * add config to run CI build on AppVeyor - * exclude benchmark folder from gem (#1522) +ifeval::[{safe-mode-level} < 20] +include::CHANGELOG.adoc[tag=compact,leveloffset=+1] +endif::[] Refer to the {uri-changelog}[CHANGELOG] for a complete list of changes in older releases. +endif::[] diff -Nru asciidoctor-1.5.5/README-de.adoc asciidoctor-2.0.10/README-de.adoc --- asciidoctor-1.5.5/README-de.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/README-de.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,476 @@ += Asciidoctor +Dan Allen ; Sarah White ; Ryan Waldron +v2.0.10, 2019-05-31 +// settings: +:idprefix: +:idseparator: - +:source-language: ruby +:language: {source-language} +ifndef::env-github[:icons: font] +ifdef::env-github[] +:status: +:outfilesuffix: .adoc +:caution-caption: :fire: +:important-caption: :exclamation: +:note-caption: :paperclip: +:tip-caption: :bulb: +:warning-caption: :warning: +endif::[] +// Variables: +:release-version: 2.0.10 +// URIs: +:uri-org: https://github.com/asciidoctor +:uri-repo: {uri-org}/asciidoctor +:uri-asciidoctorj: {uri-org}/asciidoctorj +:uri-asciidoctorjs: {uri-org}/asciidoctor.js +:uri-project: https://asciidoctor.org +ifdef::env-site[:uri-project: link:] +:uri-docs: {uri-project}/docs +:uri-news: {uri-project}/news +:uri-manpage: {uri-project}/man/asciidoctor +:uri-issues: {uri-repo}/issues +:uri-contributors: {uri-repo}/graphs/contributors +:uri-rel-file-base: link: +:uri-rel-tree-base: link: +ifdef::env-site[] +:uri-rel-file-base: {uri-repo}/blob/master/ +:uri-rel-tree-base: {uri-repo}/tree/master/ +endif::[] +:uri-changelog: {uri-rel-file-base}CHANGELOG.adoc +:uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc +:uri-license: {uri-rel-file-base}LICENSE +:uri-tests: {uri-rel-tree-base}test +:uri-discuss: http://discuss.asciidoctor.org +:uri-irc: irc://irc.freenode.org/#asciidoctor +:uri-rubygem: https://rubygems.org/gems/asciidoctor +:uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc +:uri-user-manual: {uri-docs}/user-manual +:uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor +//:uri-install-doc: {uri-docs}/install-toolchain +:uri-install-macos-doc: {uri-docs}/install-asciidoctor-macos +:uri-render-doc: {uri-docs}/render-documents +:uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory +:uri-gitscm-repo: https://github.com/git/git-scm.com +:uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb +:uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html +:uri-foundation: https://foundation.zurb.com +:uri-opal: https://opalrb.com +:uri-tilt: https://github.com/rtomayko/tilt +:uri-ruby: https://ruby-lang.org +// images: +:image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png + +{uri-project}[Asciidoctor] ist ein _schneller_, {uri-license}[Open Source] Textverarbeitungs- und Publishing-Toolchain für die Konvertierung von {uri-what-is-asciidoc}[AsciiDoc]-Inhalten in HTML 5, DocBook 5, PDF und andere Formate. +Asciidoctor ist in Ruby geschrieben und läuft auf allen gängigen Betriebsystemen. +Um die Installation zu vereinfachen wird Asciidoctor als Gem auf {uri-rubygem}[RubyGems.org] verpackt und ist als Paket für gängige Linux-Distributionen und MacOS erhältlich. +Asciidoctor kann auch in einer JVM mit {uri-asciidoctorj}[AsciidoctorJ] oder einer beliebigen Javascript-Umgebung mit {uri-asciidoctorjs}[Asciidoctor.js] ausgeführt werden. +Das Asciidoctor-Projekt wird {uri-repo}[auf GitHub] gehostet. + +ifndef::env-site[] +Dieses Dokument ist auch in folgenden Sprachen erhältlich: + +{uri-rel-file-base}README-zh_CN.adoc[汉语] +| +{uri-rel-file-base}README.adoc[English] +| +{uri-rel-file-base}README-fr.adoc[Français] +| +{uri-rel-file-base}README-jp.adoc[日本語] +endif::[] + +.Wichtige Dokumentation +[.compact] +* {uri-docs}/what-is-asciidoc[Was ist AsciiDoc?] +* {uri-docs}/asciidoc-writers-guide[Asciidoctor Benutzerhandbuch] +* {uri-docs}/user-manual[Asciidoctor Gebrauchshandbuch] +* {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc Syntax-Referenz] + +ifdef::status[] +.*Projekt Status* +image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI), link=https://travis-ci.org/asciidoctor/asciidoctor] +image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor), link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] +//image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status, link=https://coveralls.io/r/asciidoctor/asciidoctor] +//image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] +image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs, link="https://inch-ci.org/github/asciidoctor/asciidoctor"] +endif::[] + +== Sponsoren + +Wir möchten unseren großzügigen Sponsoren danken, ohne deren Unterstützung Asciidoctor nicht möglich wäre. +Vielen Dank an die Sponsoren für ihr Engagement zur Verbesserung der technischen Dokumentation! +Zusätzliche Mittel werden von unseren https://asciidoctor.org/supporters[Community Backers] zur Verfügung gestellt. + +Sie können dieses Projekt unterstützen, indem Sie Sponsor bei https://opencollective.com/asciidoctor[OpenCollective] werden. + +== Das große Ganze + +Asciidoctor liest Inhalte, die im Klartext geschrieben wurden, wie im Feld links im Bild unten gezeigt, und wandelt Sie in HTML 5 um, wie im rechten Feld dargestellt. +Asciidoctor wendet ein Standard-Stylesheet auf das HTML 5-Dokument an, um ein angenehmes Out-of-the-Box-Erlebnis zu bieten. + +image::{image-uri-screenshot}[Preview of AsciiDoc source and corresponding rendered HTML] + +== AsciiDoc Verarbeitung + +Asciidoctor liest und analysiert Text, der in der AsciiDoc-Syntax geschrieben wurde, und leitet dann den Parse-Tree durch eine Reihe von eingebauten Konvertern, um HTML 5, DocBook 5 und man-pages zu erzeugen. +Sie haben die Möglichkeit, eigene Konverter zu verwenden oder {uri-tilt}[Tilt]-gestützte Vorlagen zu laden, um die generierte Ausgabe anzupassen oder zusätzliche Formate zu erzeugen. + +Asciidoctor ist ein Ersatz für den Original AsciiDoc Python Prozessor (`asciidoc.py`). +Die Asciidoctor-Testsuite verfügt über {uri-tests}[mehr als 2,000 Tests], um die Kompatibilität mit der AsciiDoc-Syntax sicherzustellen. + +Neben der klassischen AsciiDoc-Syntax erkennt Asciidoctor zusätzliche Markup- und Formatierungsoptionen, wie z.B. fontbasierte Icons (z.B. `+icon:fire[]+`) und UI-Elemente (z.B. `+button:[Save]+`). +Asciidoctor bietet auch ein modernes, __responsive Theme__, das auf {uri-foundation}[Foundation] basiert, um die HTML 5-Ausgabe zu gestalten. + +== Wo Ruby hingeht, folgt Asciidoctor + +Sie können Asciidoctor in einer JVM mit JRuby ausführen. +Um die Asciidoctor API direkt aus Java und anderen JVM-Sprachen aufzurufen, verwenden Sie {uri-asciidoctorj}[AsciidoctorJ]. +Es stehen Ihnen auf {uri-asciidoctorj}[AsciidoctorJ] basierende Plugins zur Verfügung, die den Asciidoctor Prozessor in Apache Maven, Gradle oder Javadoc Builds integrieren. + +Asciidoctor läuft auch in JavaScript. +{uri-opal}[Opal] wird verwendet, um den Ruby-Source in JavaScript umzukompilieren, um {uri-asciidoctorjs}[Asciidoctor.js] zu erzeugen. +Asciidoctor.js ist eine voll funktionsfähige Version von Asciidoctor, die in jeder JavaScript-Umgebung wie z.B. einem Webbrowser oder Node.js funktioniert. +Es wird für die AsciiDoc Vorschau-Erweiterungen für Chrome, Atom, Brackets und andere webbasierte Werkzeuge verwendet. + +== Anforderungen + +Asciidoctor arbeitet unter Linux, MacOS und Windows und benötigt eine der folgenden Implementierungen von {uri-ruby}[Ruby]: + +* CRuby (aka MRI) 2.3 - 2.6 +* JRuby 9.1 - 9.2 +* TruffleRuby (GraalVM) +* Opal (JavaScript) + +[CAUTION] +==== +Wenn Sie eine nicht-englische Windows-Umgebung verwenden, können Sie auf einen `Encoding::UndefinedConversionError` stoßen, wenn Sie Asciidoctor aufrufen. +Um dieses Problem zu beheben, empfehlen wir, die aktive Codepage in Ihrer Konsole auf UTF-8 umzustellen: + + chcp 65001 + +Sobald Sie diese Änderung vorgenommen haben, haben Sie alle Ihre Unicode-Kopfschmerzen hinter sich. +Wenn Sie eine IDE wie Eclipse verwenden, stellen Sie sicher, dass Sie dort auch die Kodierung auf UTF-8 setzen. +Asciidoctor funktioniert am besten, wenn Sie UTF-8 überall verwenden. +==== + +== Installation + +Asciidoctor kann mit (a) Paketmanagern für gängige Linux-Distributionen, (b) Homebrew für MacOS, (c) dem Befehl `gem install` (empfohlen für Windows-Benutzer), (d) dem Asciidoctor Docker-Image oder (e) Bundler installiert werden. + +Der Vorteil der Verwendung des Paketmanagers Ihres Betriebssystems zur Installation des Gem ist, dass er die Installation von Ruby und der RubyGems-Bibliothek übernimmt, wenn diese Pakete nicht bereits auf Ihrem Rechner installiert sind. + +=== (a) Linux Paketmanager + +Die vom Paketmanager installierte Version von Asciidoctor entspricht möglicherweise nicht der neuesten Version von Asciidoctor. +Konsultieren Sie das Paket-Repository für Ihre Distribution, um herauszufinden, welche Version in der Distribution gepackt ist. + +* https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] +* https://www.archlinux.org/packages/?name=asciidoctor[Arch Linux (asciidoctor)] +* https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] +* https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] +* https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] +* https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] + +Wenn Sie eine Version von Asciidoctor verwenden möchten, die neuer ist als die, die vom Paketmanager installiert wurde, lesen Sie bitte die <>. + +==== apk (Alpine Linux) + +Um ein Gem auf Alpine Linux zu installieren, öffnen Sie ein Terminal und geben Sie folgendes ein: + + $ sudo apk add asciidoctor + +==== pacman (Arch Linux) + +Um ein Gem auf Arch-basierten Distributionen zu installieren, öffnen Sie ein Terminal und geben Sie folgendes ein: + + $ sudo pacman -S asciidoctor + +==== APT + +Auf Debian und Debian-basierten Distributionen wie Ubuntu nutzen Sie APT um Asciidoctor zu installieren. +Um das Paket zu installieren, öffnen Sie ein Terminal und geben Sie folgendes ein: + + $ sudo apt-get install -y asciidoctor + +==== DNF + +Auf RPM-basierten Linux-Distributionen, wie Fedora, CentOS und RHEL, nutzen Sie den DNF Paketmanager um Asciidoctor zu installieren. +Um das Paket zu installieren, öffnen Sie ein Terminal und geben Sie folgendes ein: + + $ sudo dnf install -y asciidoctor + +=== (b) Homebrew (macOS) + +Sie können Homebrew, den macOS-Paketmanager, verwenden, um Asciidoctor zu installieren. +Wenn Sie Homebrew nicht auf Ihrem Computer haben, führen Sie zuerst die Installationsanweisungen unter https://brew.sh/[brew.sh] aus. +Sobald Homebrew installiert ist, können Sie das Asciidoctor gem installieren. +Öffnen Sie ein Terminal und geben Sie folgendes ein: + + $ brew install asciidoctor + +Homebrew installiert das `asciidoctor` Gem in ein exklusives Präfix, das unabhängig von den System-Gems ist. + +=== (c) Windows + +Um Asciidoctor unter Windows zu installieren, gibt es zwei einfache Möglichkeiten. + +==== Chocolatey + +Wenn Sie bereits https://chocolatey.org[chocolatey] verwenden, können Sie folgenden Befehl verwenden: + +[source] +---- +choco install ruby +---- + +Danach folgen Sie der <>. + +==== Rubyinstaller + +Oder Sie benutzen den https://rubyinstaller.org/downloads/[Rubyinstaller], laden Sie das für Ihre Windows Version passende Paket herunter und nach der Installation folgen Sie ebenfalls der <>. + +[#gem-install] +=== (d) gem install + +Bevor Sie Asciidoctor mit `gem install` installieren, sollten Sie https://rvm.io[RVM] verwenden, um Ruby in Ihrem Home-Verzeichnis zu installieren (z.B. Userspace). +Dann können Sie den Befehl `gem` sicher verwenden, um den Asciidoctor Gem zu installieren oder zu aktualisieren. +Bei der Verwendung von RVM werden Gems an einem vom System isolierten Ort installiert. + +Öffnen Sie ein Terminal und geben Sie folgendes ein: + + $ gem install asciidoctor + +Wenn Sie eine Vorabversion (z.B. einen Release-Kandidaten) installieren möchten, verwenden Sie: + + $ gem install asciidoctor --pre + +=== (e) Docker + +Siehe {uri-install-docker}[Installing Asciidoctor using Docker]. + +=== (f) Bundler + +. Erstellen Sie ein Gemfile im Stammordner Ihres Projekts (oder im aktuellen Verzeichnis). +. Fügen Sie den `asciidoctor` Gem wie folgt zu Ihrem Gemfile hinzu: + ++ +[source,subs=attributes+] +---- +source 'https://rubygems.org' +gem 'asciidoctor' +# oder spezifizieren Sie die Version explizit +# gem 'asciidoctor', '{release-version}' +---- + +. Speichern Sie das Gemfile +. Öffnen Sie ein Terminal und installieren Sie das Gem mit: + + $ bundle + +Um das Gem zu aktualisieren, geben Sie die neue Version im Gemfile an und führen Sie `bundle` erneut aus. +Die Verwendung von `bundle update` (ohne Angabe eines Gem) wird *nicht* empfohlen, da es auch andere Gems aktualisiert, was möglicherweise nicht das gewünschte Ergebnis ist. + +== Upgrade + +Wenn Sie Asciidoctor mit einem Paketmanager installiert haben, ist ihr Betriebssystem wahrscheinlich so konfiguriert, dass es Pakete automatisch aktualisiert. +In diesem Fall müssen Sie das Gem nicht manuell aktualisieren. + +=== apk (Alpine Linux) + +Um das Gem zu aktualisieren, nutzen Sie: + + $ sudo apk add -u asciidoctor + +=== APT + +Um das Gem zu aktualisieren, nutzen Sie: + + $ sudo apt-get upgrade -y asciidoctor + +=== DNF + +Um das Gem zu aktualisieren, nutzen Sie: + + $ sudo dnf update -y asciidoctor + +=== Homebrew (macOS) + +Um das Gem zu aktualisieren, nutzen Sie: + + $ brew update + $ brew upgrade asciidoctor + +=== gem install + +Wenn Sie Asciidoctor zuvor mit dem Befehl `gem` installiert haben, müssen Sie Asciidoctor manuell aktualisieren, wenn eine neue Version veröffentlicht wird. +Sie können mit folgendem Befehl aktualisieren: + + $ gem install asciidoctor + +Wenn Sie eine neue Version des Edelsteins mit `gem install` installieren, werden mehrere Versionen installiert. +Verwenden Sie den folgenden Befehl, um die alten Versionen zu entfernen: + + $ gem cleanup asciidoctor + +== Verwendung + +Wenn der Asciidoctor Gem erfolgreich installiert wurde, ist das `asciidoctor` Kommandozeilen-Interface (CLI) in Ihrem PATH verfügbar. +Um die Verfügbarkeit zu überprüfen, führen Sie den folgenden Befehl in Ihrem Terminal aus: + + $ asciidoctor --version + +Sie sollten Informationen über die Asciidoctor-Version und Ihre Ruby-Umgebung im Terminal sehen. + +[.output,subs=attributes+] +.... +Asciidoctor 1.5.7 [https://asciidoctor.org] +Laufzeitumgebung (ruby 2.6.0p0 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) +.... + +Asciidoctor bietet auch eine API. +Die API ist für die Integration mit anderer Ruby-Software wie Rails, Sinatra und GitHub und anderen Sprachen wie Java (über {uri-asciidoctorj}[AsciidoctorJ]) und JavaScript (über {uri-asciidoctorjs}[Asciidoctor.js]) vorgesehen. + +=== Kommandozeile + +Mit dem Befehl `asciidoctor` können Sie Asciidoctor von der Kommandozeile (z.B. einem Terminal) aus aufrufen. + +Der folgende Befehl konvertiert die Datei README.adoc nach HTML und speichert das Ergebnis in der Datei README.html im gleichen Verzeichnis. +Der Name der erzeugten HTML-Datei wird aus der Quelldatei abgeleitet, indem die Dateierweiterung auf `.html` geändert wird. + + $ asciidoctor README.adoc + +Sie können den Asciidoctor-Prozessor steuern, indem Sie verschiedene Flags und Schalter hinzufügen, über die Sie sich mittels folgendem Befehl informieren können: + + $ asciidoctor --help + +Zum Beispiel, um die Datei in ein anderes Verzeichnis zu schreiben, verwenden Sie: + + $ asciidoctor -D output README.adoc + +Die `asciidoctor` {uri-manpage}[man page] bietet eine vollständige Referenz der Kommandozeile. + +Lesen Sie die folgenden Ressourcen, um mehr über die Verwendung des `asciidoctor`-Befehls zu erfahren. + +* {uri-render-doc}[How do I convert a document?] +* {uri-themes-doc}[How do I use the Asciidoctor stylesheet factory to produce custom themes?] + +=== Ruby API + +Um Asciidoctor in Ihrer Anwendung verwenden zu können, benötigen Sie zunächst das Gem: + +[source] +---- +require 'asciidoctor' +---- + +Sie können dann eine AsciiDoc-Quelldatei in eine HTML-Datei konvertieren: + +[source] +---- +Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe +---- + +WARNING: Bei Verwendung von Asciidoctor über die API ist der Standard-Sicherheitsmodus `:secure`. +Im sicheren Modus sind mehrere Kernfunktionen deaktiviert, darunter die `include`-Direktive. +Wenn Sie diese Funktionen aktivieren möchten, müssen Sie den Sicherheitsmodus explizit auf `:server` (empfohlen) oder `:safe` setzen. + +Sie können einen AsciiDoc-String auch in ein integrierbares HTML (zum Einfügen in eine HTML-Seite) konvertieren, mit: + +[source] +---- +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' +Asciidoctor.convert content, safe: :safe +---- + +Wenn Sie das komplette HTML-Dokument wünschen, aktivieren Sie die Option `head_footer` wie folgt: + +[source] +---- +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' +html = Asciidoctor.convert content, header_footer: true, safe: :safe +---- + +Wenn Sie Zugriff auf das analysierte Dokument benötigen, können Sie die Konvertierung in einzelne Schritte aufteilen: + +[source] +---- +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' +document = Asciidoctor.load content, header_footer: true, safe: :safe +puts document.doctitle +html = document.convert +---- + +Denken Sie daran, dass __Sie die Ausgabe__ von Asciidoctor __ändern können__, wenn Sie sie nicht mögen! +Asciidoctor unterstützt benutzerdefinierte Konverter, die die Konvertierung vom geparsten Dokument in die generierte Ausgabe übernehmen können. + +Eine einfache Möglichkeit, die Ausgabe stückweise anzupassen, ist die Verwendung des Template-Konverters. +Der Template-Konverter ermöglicht es Ihnen, eine von {uri-tilt}[Tilt]-gestützte Template-Datei zur Verfügung zu stellen, um die Konvertierung eines beliebigen Knotens im Dokument zu handhaben. + +Wie auch immer Sie vorgehen, Sie können die Ausgabe zu 100% kontrollieren. +Weitere Informationen zur Verwendung der API oder zur Anpassung der Ausgabe finden Sie im {uri-user-manual}[Benutzerhandbuch]. + +== Mitwirken + +Neue Mitwirkende sind immer willkommen! +Wenn Sie Fehler oder Auslassungen im Quellcode, in der Dokumentation oder im Inhalt der Website entdecken, zögern Sie bitte nicht, ein Problem zu melden oder eine Pull Request mit einem Fix zu öffnen. + +Hier sind einige Möglichkeiten, wie *Sie* dazu beitragen können: + +* durch Verwendung von Vorabversionen (Alpha-, Beta- oder Preview-Versionen) +* durch das Melden von Fehlern +* durch Vorschläge für neue Funktionen +* durch das Verfassen oder Bearbeiten von Dokumentationen +* durch Schreiben von Code mit Tests -- _Kein Patch ist zu klein._ +** Tippfehler beheben +** Kommentare hinzufügen +** inkonsistente Leerzeichen bereinigen +** Tests schreiben! +* Refactoring von Code +* durch die Behebung von {uri-issues}[Problemen] +* durch Überprüfung von Patches + +Der {uri-contribute}[Contributing Guide] bietet Informationen darüber, wie man Probleme, Feature Requests, Code und Dokumentation für das Asciidoctor Projekt erstellt, gestaltet und einreicht. + +== Hilfe finden + +Asciidoctor wurde entwickelt, um Ihnen das Schreiben und Veröffentlichen Ihrer Inhalte zu erleichtern. +Aber wir können es nicht ohne ihr Feedback machen! +Wir ermutigen Sie, Fragen zu stellen und alle Aspekte des Projekts auf der Diskussionsliste, auf Twitter oder im Chatroom zu diskutieren. + +Chat (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] +Discussionsliste (Nabble):: {uri-discuss} +Twitter:: https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] hashtag or https://twitter.com/asciidoctor[@asciidoctor] mention + +ifdef::env-github[] +Weitere Informationen und Dokumentation zu Asciidoctor finden Sie auf der Website des Projekts. + +{uri-project}[Home] | {uri-news}[News] | {uri-docs}[Docs] +endif::[] + +Die Asciidoctor-Organisation auf GitHub hostet den Quellcode des Projekts, den Issue Tracker und Unterprojekte. + +Source repository (git):: {uri-repo} +Issue tracker:: {uri-issues} +Asciidoctor Organization auf GitHub:: {uri-org} + +== Lizenz + +Copyright (C) 2012-2019 Dan Allen, Sarah White, Ryan Waldron, und die einzelnen Mitarbeiter von Asciidoctor. +Die Nutzung dieser Software wird unter den Bedingungen der MIT-Lizenz gewährt. + +Siehe die {uri-license}[LIZENZ] für den vollen Lizenztext. + +== Authoren + +*Asciidoctor* wird von https://github.com/mojavelinux[Dan Allen] und https://github.com/graphitefriction[Sarah White] geleitet und hat Beiträge von {uri-contributors}[vielen Personen] in Asciidoctors großartiger Gemeinschaft erhalten. +Das Projekt wurde 2012 von https://github.com/erebor[Ryan Waldron] initiiert und basiert auf einem {uri-prototype}[Prototyp] von https://github.com/nickh[Nick Hengeveld]. + +*AsciiDoc* wurde von Stuart Rackham gegründet und hat Beiträge von vielen Personen aus der AsciiDoc-Community erhalten. + +ifndef::env-site[] +== Changelog + +ifeval::[{safe-mode-level} < 20] +include::CHANGELOG.adoc[tag=compact,leveloffset=+1] +endif::[] + +Eine vollständige Liste der Änderungen in älteren Versionen finden Sie im {uri-changelog}[CHANGELOG]. +endif::[] diff -Nru asciidoctor-1.5.5/README-fr.adoc asciidoctor-2.0.10/README-fr.adoc --- asciidoctor-1.5.5/README-fr.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/README-fr.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -1,23 +1,29 @@ = Asciidoctor Dan Allen ; Sarah White ; Ryan Waldron -// FIXME use build system to expand includes statically so document renders properly on GitHub -ifeval::[{safe-mode-level} < 20] -include::_settings-README.adoc[] -endif::[] -ifeval::[{safe-mode-level} >= 20] +v2.0.10, 2019-05-31 // settings: -:page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} -ifdef::env-github[:status:] +ifndef::env-github[:icons: font] +ifdef::env-github[] +:status: +:outfilesuffix: .adoc +:caution-caption: :fire: +:important-caption: :exclamation: +:note-caption: :paperclip: +:tip-caption: :bulb: +:warning-caption: :warning: +endif::[] +// Variables: +:release-version: 2.0.10 // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js -:uri-project: http://asciidoctor.org +:uri-project: https://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news @@ -32,7 +38,7 @@ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc -:uri-license: {uri-rel-file-base}LICENSE.adoc +:uri-license: {uri-rel-file-base}LICENSE :uri-tests: {uri-rel-tree-base}test :uri-discuss: http://discuss.asciidoctor.org :uri-irc: irc://irc.freenode.org/#asciidoctor @@ -41,29 +47,31 @@ :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain -:uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx +:uri-install-macos-doc: {uri-docs}/install-asciidoctor-macos :uri-render-doc: {uri-docs}/render-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html -:uri-foundation: http://foundation.zurb.com +:uri-foundation: https://foundation.zurb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png -endif::[] -{uri-project}/[Asciidoctor] est un processeur de texte _rapide_ et une chaîne de publication pour convertir du contenu {uri-what-is-asciidoc}[AsciiDoc] en HTML5, DocBook 5 (ou 4.5) et d'autres formats. -Asciidoctor est écrit en Ruby, packagé sous forme de RubyGem et publié sur {uri-rubygem}[RubyGems.org]. -La gemme est aussi incluse dans plusieurs distributions Linux, dont Fedora, Debian et Ubuntu. -Asciidoctor est open source, {uri-repo}[hébergé sur GitHub] et distribué sous {uri-license}[licence MIT]. +{uri-project}/[Asciidoctor] est un processeur de texte et une chaîne de publication _rapide_ et {uri-license}[open source] permettant de convertir du contenu {uri-what-is-asciidoc}[AsciiDoc] en HTML 5, DocBook 5, PDF et d'autres formats. +Asciidoctor est écrit en Ruby et fonctionne sur les principaux systèmes d'exploitation. +Pour simplifier l'installation, Asciidoctor est publié au format gem sur {uri-rubygem}[RubyGems.org], et il est également disponible en tant que paquet système sur les principales distributions Linux ainsi que sur macOS. +Asciidoctor fonctionne aussi sur la JVM avec {uri-asciidoctorj}[AsciidoctorJ] et dans n'importe quel environnement JavaScript avec {uri-asciidoctorjs}[Asciidoctor.js]. +Le projet Asciidoctor est {uri-repo}[hébergé sur GitHub]. ifndef::env-site[] -.Ce document est traduit dans les langues suivantes : -* {uri-rel-file-base}README.adoc[Anglais] -* {uri-rel-file-base}README-zh_CN.adoc[Chinois] -* {uri-rel-file-base}README-jp.adoc[Japonais] +Ce document est traduit dans les langues suivantes : + +{uri-rel-file-base}README.adoc[Anglais] +| +{uri-rel-file-base}README-zh_CN.adoc[Chinois] +| +{uri-rel-file-base}README-jp.adoc[Japonais] endif::[] .Documentation clé @@ -73,57 +81,61 @@ * {uri-docs}/asciidoc-syntax-quick-reference[Syntaxe de Référence AsciiDoc] * {uri-docs}/user-manual[Manuel Utilisateur Asciidoctor] -.Asciidoctor est disponible partout où Ruby est disponible -**** -Vous pouvez exécuter Asciidoctor dans la JVM en utilisant JRuby. -Pour invoquer l'API Asciidoctor directement depuis Java ou d'autres langages de la JVM, utilisez {uri-asciidoctorj}[AsciidoctorJ]. -Des plugins basés sur AsciidoctorJ permettent d'intégrer le processeur Asciidoctor avec Apache Maven, Gradle ou Javadoc. - -Asciidoctor s'exécute également au sein de JavaScript. -Nous utilisons http://opalrb.org[Opal] pour transcrire le code source Ruby en JavaScript afin de produire {uri-asciidoctorjs}[Asciidoctor.js], une version pleinement fonctionnelle d'Asciidoctor qui s'intègre dans tout environnement JavaScript, comme un navigateur web ou Node.js. -Asciidoctor.js est utilisé pour faire fonctionner les extensions AsciiDoc Preview pour Chrome, Atom, Brackets et autres outils web. -**** - ifdef::status[] .*Santé du projet* image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI), link=https://travis-ci.org/asciidoctor/asciidoctor] image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor), link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] //image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status, link=https://coveralls.io/r/asciidoctor/asciidoctor] -image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] +//image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs, link="https://inch-ci.org/github/asciidoctor/asciidoctor"] endif::[] +== Sponsors + +Nous souhaitons exprimer toute notre reconnaissance à nos généreux sponsors, sans qui Asciidoctor ne pourrait pas exister. +Merci à vous pour votre engagement dans l'amélioration de la documentation technique ! +Un apport financier supplémentaire est assuré par https://asciidoctor.org/supporters[la communauté]. + +Vous pouvez aider ce projet en devant un sponsor sur https://opencollective.com/asciidoctor[OpenCollective]. + == En un mot -Asciidoctor lit du contenu écrit en texte brut, comme présenté dans la partie gauche de l'image ci-dessous, et le convertit en HTML5, comme présenté dans la partie droite. -Asciidoctor applique une feuille de style par défaut au document HTML5 afin de fournir une expérience de lecture agréable, clé en main. +Asciidoctor lit du contenu écrit en texte brut, comme présenté dans la partie gauche de l'image ci-dessous, et le convertit en HTML 5, comme présenté dans la partie droite. +Asciidoctor applique une feuille de style par défaut au document HTML 5 afin de fournir une expérience de lecture agréable, clé en main. image::{image-uri-screenshot}[Prévisualisation d'une source AsciiDoc et le rendu HTML correspondant] == Le traitement d'AsciiDoc -Asciidoctor lit et analyse la syntaxe du texte écrit en AsciiDoc afin de créer une représentation, sous forme d'arbre, à partir de laquelle des templates sont appliqués pour produire de l'HTML5, du DocBook 5 (ou 4.5). +Asciidoctor lit et analyse la syntaxe du texte écrit en AsciiDoc afin de créer une représentation, sous forme d'arbre, à partir de laquelle des templates sont appliqués pour produire de l'HTML 5, du DocBook 5 et des pages de man(uel). Vous avez la possibilité d'écrire votre propre convertisseur ou de fournir des templates supportant {uri-tilt}[Tilt] pour personnaliser le résultat généré ou pour produire des formats alternatifs. -NOTE: Asciidoctor est un remplaçant du processeur AsciiDoc original écrit en Python (`asciidoc.py`). -La suite de tests Asciidoctor possède {uri-tests}[> 1,600 tests] pour garantir la compatibilité avec la syntaxe AsciiDoc. +Asciidoctor remplace le processeur AsciiDoc original écrit en Python (`asciidoc.py`). +La suite de tests Asciidoctor possède {uri-tests}[plus de 2,000 tests] afin de garantir la compatibilité avec la syntaxe AsciiDoc. En plus de la syntaxe AsciiDoc standard, Asciidoctor reconnaît des balises additionnelles ainsi que des options de formatage, comme les polices d'icônes (par exemple `+icon:fire[]+`) et des éléments d'interface (par exemple `+button:[Enregistrer]+`). -Asciidoctor offre aussi un thème moderne et « responsive » basé sur {uri-foundation}[Foundation] pour styliser le document HTML5 généré. +Asciidoctor offre aussi un thème moderne et « responsive » basé sur {uri-foundation}[Foundation] pour styliser le document HTML 5 généré. + +== Asciidoctor est disponible partout où Ruby est disponible + +Vous pouvez exécuter Asciidoctor dans la JVM en utilisant JRuby. +Pour invoquer l'API Asciidoctor directement depuis Java ou d'autres langages de la JVM, utilisez {uri-asciidoctorj}[AsciidoctorJ]. +Des plugins basés sur AsciidoctorJ permettent d'intégrer le processeur Asciidoctor avec Apache Maven, Gradle ou Javadoc. + +Asciidoctor s'exécute également au sein de JavaScript. +Nous utilisons https://opalrb.com[Opal] pour transcrire le code source Ruby en JavaScript afin de produire {uri-asciidoctorjs}[Asciidoctor.js], une version pleinement fonctionnelle d’Asciidoctor qui s’intègre dans tout environnement JavaScript, comme un navigateur web ou Node.js. +Asciidoctor.js est utilisé pour faire fonctionner les extensions AsciiDoc Preview pour Chrome, Atom, Brackets et autres outils web. == Prérequis -Asciidoctor fonctionne sur Linux, OS X (Mac), Windows et requiert une des implémentations suivantes : +Asciidoctor fonctionne sur Linux, macOS et Windows et requiert une des implémentations suivantes de {uri-ruby}[Ruby] : -* MRI (Ruby 1.8.7, 1.9.3, 2.0, 2.1, 2.2 & 2.3) -* JRuby (1.7 dans les modes Ruby 1.8 et 1.9, 9000) -* Rubinius 2.2.x +* CRuby (aka MRI) 2.3 - 2.6 +* JRuby 9.1 - 9.2 +* TruffleRuby (GraalVM) * Opal (JavaScript) -Votre aide est appréciée pour tester Asciidoctor sur l'une de ces plateformes. -Référez-vous au paragraphe <> si vous souhaitez vous impliquer dans ce projet. - [CAUTION] ==== Si vous utilisez un environnement Windows dans une autre langue que l'anglais, vous pourriez tomber sur l'erreur `Encoding::UndefinedConversionError` lors du lancement d'Asciidoctor. @@ -138,15 +150,63 @@ == Installation -Asciidoctor peut être installé en utilisant la commande (a) `gem install`, (b) Bundler ou (c) les gestionnaires de paquets pour les distributions Linux populaires. +Asciidoctor peut être installé en utilisant (a) un gestionnaire de paquets Linux, (b) Homebrew pour macOS, (c) la commande `gem install` (recommandé pour les utilisateurs Windows), (d) l'image officielle Docker, ou (e) Bundler. -TIP: L'avantage d'utiliser le gestionnaire de paquets pour installer la gemme est que l'installation englobe celle des librairies Ruby et RubyGems si elles ne sont pas déjà installés. -L'inconvénient est que le paquet n'est pas forcément mis à jour immédiatement après la mise à disposition de la gemme. -Si vous avez besoin de la dernière version, vous devez passer par la commande `gem`. +L'avantage d'utiliser le gestionnaire de paquets pour installer la gemme est que l'installation englobe celle des librairies Ruby et RubyGems si elles ne sont pas déjà installées. -=== (a) Installation de la gemme +=== (a) Gestionnaires de paquets Linux + +La version installée par votre gestionnaire de paquets peut ne pas correspondre à la dernière version d'Asciidoctor. +Consulter le dépôt de votre distribution Linux pour connaitre la dernière version disponible d'Asciidoctor en fonction de la version de votre distribution. + +* https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] +* https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] +* https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] +* https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] +* https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] + +Si vous souhaitez installer une version plus récente d'Asciidoctor que celle proposée par votre gestionnaire de paquets, suivre <>. + +==== apk (Alpine Linux) -Ouvrir un terminal et taper (en excluant le `$`) : +Pour installer le paquet sur Alpine Linux, ouvrez un terminal et tapez : + + $ sudo apk add asciidoctor + +==== APT + +Sur Debian et les distributions dérivées de Debian, comme Ubuntu, utilisez APT pour installer Asciidoctor. +Pour installer le paquet, ouvrez un terminal et tapez : + + $ sudo apt-get install -y asciidoctor + +==== DNF + +Sur les distributions Linux qui utilisent des RPM, comme Fedora, CentOS, et RHEL, utilisez le gestionnaire de paquets DNF pour installer Asciidoctor. +Pour installer le paquet, ouvrez un terminal et tapez : + + $ sudo dnf install -y asciidoctor + +=== (b) Homebrew (macOS) + +Vous pouvez utiliser Homebrew, le gestionnaire de paquets sur macOS, pour installer Asciidoctor. +Si vous n'avez pas encore installé Homebrew, suivez les instructions sur https://brew.sh/[brew.sh]. + +Une fois Homebrew installé, vous pouvez installer Asciidoctor. +Ouvrez un terminal et tapez : + + $ brew install asciidoctor + +Homebrew installe la gemme `asciidoctor` dans un répertoire spécifique qui est indépendant des gemmes système. + +[#gem-install] +=== (c) gem install + +Avant d'installer Asciidoctor en utilisant `gem install`, il est recommandé d'utiliser https://rvm.io[RVM] pour installer Ruby dans votre « home » (c'est-à-dire, votre espace utilisateur). +Ensuite, vous pouvez utiliser la commande `gem` pour installer ou mettre à jour la gemme Asciidoctor. +Quand vous utilisez RVM, les gemmes sont installées dans un répertoire isolé du système. + +Ouvrez un terminal et tapez : $ gem install asciidoctor @@ -154,30 +214,21 @@ $ gem install asciidoctor --pre -.Mettre à jour votre installation -[TIP] -==== -Si vous avez une précédente version d'Asciidoctor installée, vous pouvez la mettre à jour en utilisant : +=== (d) Docker - $ gem update asciidoctor +Lire {uri-install-docker}[Installer Asciidoctor en utilisant Docker]. -Si vous installez une nouvelle version de la gemme en utilisant `gem install` au lieu de `gem update`, vous aurez plusieurs versions d'installées. -Si c'est le cas, utilisez la commande gem suivante pour supprimer la vieille version : - - $ gem cleanup asciidoctor -==== - -=== (b) Bundler +=== (e) Bundler . Créez un fichier Gemfile à la racine de votre projet (ou du répertoire courant) . Ajoutez la gemme `asciidoctor` dans votre fichier Gemfile comme ci-dessous : + -[source] +[source,subs=attributes+] ---- source 'https://rubygems.org' gem 'asciidoctor' # ou spécifier la version explicitement -# gem 'asciidoctor', '1.5.4' +# gem 'asciidoctor', '{release-version}' ---- . Sauvegardez le fichier Gemfile @@ -188,68 +239,46 @@ Pour mettre à jour la gemme, spécifiez la nouvelle version dans le fichier Gemfile et exécutez `bundle` à nouveau. Utiliser `bundle update` *n*'est *pas* recommandé car les autres gemmes seront également mises à jour, ce qui n'est pas forcément le résultat voulu. -=== (c) Gestionnaire de paquets Linux +== Mise à jour -==== DNF (Fedora 21 ou supérieure) +Si vous avez installé Asciidoctor en utilisant votre gestionnaire de paquets, votre système d'exploitation est surement configuré pour mettre à jour automatiquement les paquets, si tel est le cas vous n'avez pas besoin de mettre à jour manuellement Asciidoctor. -Pour installer la gemme sur Fedora 21 ou supérieure en utilisant dnf, ouvrez un terminal et tapez : +=== apk (Alpine Linux) - $ sudo dnf install -y asciidoctor - -Pour mettre à jour la gemme, utilisez : - - $ sudo dnf update -y asciidoctor +Pour mettre à jour Asciidoctor, tapez : -TIP: Votre système peut être configuré pour mettre à jour automatiquement les paquets rpm, auquel cas aucune action de votre part ne sera nécessaire pour mettre à jour la gemme. - -==== apt-get (Debian, Ubuntu, Mint) + $ sudo apk add -u asciidoctor -Pour installer la gemme sur Debian, Ubuntu ou Mint, ouvrez un terminal et tapez : +=== APT - $ sudo apt-get install -y asciidoctor - -Pour mettre à jour la gemme, utilisez : +Pour mettre à jour Asciidoctor, tapez : $ sudo apt-get upgrade -y asciidoctor -TIP: Votre système peut être configuré pour mettre à jour automatiquement les paquets deb, auquel cas aucune action de votre part ne sera nécessaire pour mettre à jour la gemme. - -La version d'Asciidoctor installé par le gestionnaire de paquets (apt-get) peut ne pas correspondre à la dernière version d'Asciidoctor. -Consultez le dépôt de paquets de votre distribution pour trouver quelle version est disponible par version de distribution. +=== DNF -* https://packages.debian.org/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Paquet asciidoctor par version de Debian] -* http://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Paquet asciidoctor par version d'Ubuntu] -* https://community.linuxmint.com/software/view/asciidoctor[Paquet asciidoctor par version de Mint] +Pour mettre à jour Asciidoctor, tapez : -[CAUTION] -==== -Il est déconseillé d'utiliser la commande `gem update` pour mettre à jour la gemme gérée par le gestionnaire de paquets. -Le faire mettrait la système dans un état incohérent car le gestionnaire de paquets ne pourrait plus gérer les fichiers (qui sont installés dans /usr/local). -En résumé, les gemmes du système doivent être gérées seulement par le gestionnaire de paquets. - -Si vous souhaitez utiliser une version d'Asciidoctor qui est plus récente que celle installée par votre gestionnaire de paquets, vous devriez utiliser http://rvm.io[RVM] pour installer Ruby dans votre répertoire personnel (dans votre espace utilisateur). -Vous pouvez alors utiliser la commande `gem` pour installer ou mettre à jour la gemme Asciidoctor. -En utilisant RVM, les gemmes sont installées dans un emplacement isolé du système. -==== + $ sudo dnf update -y asciidoctor -==== apk (Alpine Linux) +=== Homebrew (macOS) -Pour installer la gemme sur Alpine Linux, ouvrez un terminal et tapez : +Pour mettre à jour Asciidoctor, tapez : - $ sudo apk add asciidoctor + $ brew update + $ brew upgrade asciidoctor -Pour mettre à jour la gemme, utilisez : +=== gem install - $ sudo apk add -u asciidoctor +Si vous avez précédemment installé Asciidoctor en utilisant la commande `gem`, vous devez manuellement mettre à jour Asciidoctor quand une nouvelle version est publiée. +Vous pouvez mettre à jour Asciidoctor en tappant : -TIP: Votre système peut être configuré pour mettre à jour automatiquement les paquets apk, auquel cas aucune action de votre part ne sera nécessaire pour mettre à jour la gemme. + $ gem install asciidoctor -=== Autres options d'installation +Quand vous installez une nouvelle version en utilisant `gem install`, vous vous retrouvez avec plusieurs versions installées. +Utilisez la commande ci-dessous pour supprimer les anciennes versions : -* {uri-install-docker}[Installation d'Asciidoctor avec Docker] -* {uri-install-osx-doc}[Installation d'Asciidoctor sur Mac OS X] -// pour l'instant, l'entrée suivante est juste une répétition de l'information dans ce README -//* {uri-install-doc}[Installation de l'outillage Asciidoctor] + $ gem cleanup asciidoctor == Utilisation @@ -260,10 +289,10 @@ Vous devriez voir les informations concernant la version d'Asciidoctor et celle de votre environnement Ruby s'afficher dans le terminal. -[.output] +[.output,subs=attributes+] .... -Asciidoctor 1.5.4 [http://asciidoctor.org] -Runtime Environment (ruby 2.2.2p95 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) +Asciidoctor {release-version} [https://asciidoctor.org] +Runtime Environment (ruby 2.4.1p111 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... Asciidoctor fournit aussi une API. @@ -313,7 +342,7 @@ [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- @@ -321,7 +350,7 @@ [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- @@ -329,7 +358,7 @@ [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert @@ -346,9 +375,8 @@ == Contributions -Dans l'esprit du {uri-freesoftware}[logiciel libre], _tout le monde_ est encouragé à aider en vue d'améliorer le projet. -Si vous découvrez des erreurs ou des oublis dans le code source, la documentation, ou le contenu du site web, s'il vous plaît n'hésitez pas à ouvrir un ticket ou une « pull request » avec un correctif. Les contributeurs et contributrices sont toujours les bienvenus ! +Si vous découvrez des erreurs ou des oublis dans le code source, la documentation, ou le contenu du site web, s'il vous plaît n'hésitez pas à ouvrir un ticket ou une « pull request » avec un correctif. Voici quelques façons de contribuer : @@ -356,8 +384,7 @@ * en rapportant des anomalies, * en suggérant de nouvelles fonctionnalités, * en écrivant ou éditant la documentation, -* en écrivant des spécifications, -* en écrivant du code -- _Aucun patch n'est trop petit_ +* en écrivant du code avec des tests -- _Aucun patch n'est trop petit_ ** corriger une coquille, ** ajouter des commentaires, ** nettoyer des espaces inutiles, @@ -370,13 +397,13 @@ == Être aidé -Le projet Asciidoctor est développé pour vous aider à écrire et publier du contenu. -Mais nous ne pouvons pas le faire sans avoir vos avis ! -Nous vous encourageons à poser vos questions et discuter de n'importe quels aspects du projet sur la liste de discussion, Twitter ou dans le salon de discussion. - -Mailing list:: {uri-discuss} -Twitter (Chat):: hashtag #asciidoctor ou la mention @asciidoctor -Gitter (Chat):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] +Asciidoctor est développé dans le but de vous aider à écrire et publier du contenu. +Mais nous ne pouvons pas le faire sans vos avis ! +Nous vous encourageons à poser vos questions et à discuter de n'importe quels aspects du projet sur la liste de discussion, Twitter ou dans le salon de discussion. + +Chat (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] +Forum (Nabble):: {uri-discuss} +Twitter:: hashtag https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] ou la mention https://twitter.com/asciidoctor[@asciidoctor] //// IRC (Chat):: {uri-irc}[#asciidoctor] sur FreeNode IRC //// @@ -393,9 +420,9 @@ Gestionnaire de tickets:: {uri-issues} L'organisation Asciidoctor sur GitHub:: {uri-org} -== Copyright et licence +== Licence -Copyright (C) 2012-2016 Dan Allen, Ryan Waldron et le projet Asciidoctor. +Copyright (C) 2012-2019 Dan Allen, Sarah White, Ryan Waldron, et les contributeurs individuels d'Asciidoctor. Une utilisation libre de ce logiciel est autorisée sous les termes de la licence MIT. Consultez le fichier {uri-license}[LICENSE] pour plus de détails. @@ -407,10 +434,12 @@ *AsciiDoc* a été démarré par Stuart Rackham et a reçu de nombreuses contributions de la part de la communauté AsciiDoc. +ifndef::env-site[] == Changelog ifeval::[{safe-mode-level} < 20] -include::CHANGELOG.adoc[tags=compact;parse,leveloffset=+1] +include::CHANGELOG.adoc[tag=compact,leveloffset=+1] endif::[] Référez-vous au fichier {uri-changelog}[CHANGELOG] pour une liste complète des changements des versions précédentes. +endif::[] diff -Nru asciidoctor-1.5.5/README-jp.adoc asciidoctor-2.0.10/README-jp.adoc --- asciidoctor-1.5.5/README-jp.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/README-jp.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -1,18 +1,33 @@ = Asciidoctor Dan Allen ; Sarah White ; Ryan Waldron +v2.0.10, 2019-05-31 // settings: -:page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} -ifdef::env-github[:status:] +ifndef::env-github[:icons: font] +ifdef::env-github[] +:status: +:outfilesuffix: .adoc +:caution-caption: :fire: +:important-caption: :exclamation: +:note-caption: :paperclip: +:tip-caption: :bulb: +:warning-caption: :warning: +endif::[] +// Variables: +:release-version: 2.0.10 // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js -:uri-project: http://asciidoctor.org +:uri-gradle-plugin: {uri-org}/asciidoctor-gradle-plugin +:uri-maven-plugin: {uri-org}/asciidoctor-maven-plugin +:uri-asciidoclet: {uri-org}/asciidoclet +:uri-project: https://asciidoctor.org +:uri-gem: https://rubygems.org/gems/asciidoctor ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news @@ -21,13 +36,13 @@ :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: -ifdef::env-site[] +ifdef::env-site,env-yard[] :uri-rel-file-base: {uri-repo}/blob/master/ :uri-rel-tree-base: {uri-repo}/tree/master/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc -:uri-license: {uri-rel-file-base}LICENSE.adoc +:uri-license: {uri-rel-file-base}LICENSE :uri-tests: {uri-rel-tree-base}test :uri-discuss: http://discuss.asciidoctor.org :uri-irc: irc://irc.freenode.org/#asciidoctor @@ -36,138 +51,225 @@ :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain -:uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx -:uri-render-doc: {uri-docs}/render-documents +:uri-install-macos-doc: {uri-docs}/install-asciidoctor-macos +:uri-convert-doc: {uri-docs}/convert-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html -:uri-foundation: http://foundation.zurb.com +:uri-foundation: https://foundation.zurb.com +:uri-opal: https://opalrb.com :uri-tilt: https://github.com/rtomayko/tilt -:uri-ruby: https://ruby-lang.org +:uri-ruby: https://www.ruby-lang.org // images: :image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png -{uri-project}/[Asciidoctor]は _高速な_ テキストプロセッサで {uri-what-is-asciidoc}[Asciidoc] をHTML5, DocBook 5(4.5)や他のフォーマットに変換するツールチェインを配布しています. -AsciidoctorはRubyで書かれており, RubyGemとしてパッケージされ, {uri-rubygem}[RubyGems.org] で配布されています. -gemはいくつかのLinuxディストリビューション, Fedora, Debian, Ubuntuにも含まれています. -Asciidoctorはオープンソース {uri-repo}[hosted on Github] で {uri-license}[the MIT licence]のもとに配布されます. - -ifndef::env-site[] -.Translations of the document are available in the following languages: -* {uri-rel-file-base}README-zh_CN.adoc[汉语] -* {uri-rel-file-base}README.adoc[English] -* {uri-rel-file-base}README-fr.adoc[Français] +{uri-project}[Asciidoctor]は, {uri-what-is-asciidoc}[AsciiDoc] で書かれたコンテンツをHTML5, DocBook, PDFなどのフォーマットに変換する, _高速で_ {uri-license}[オープンソース] のテキストプロセッサおよびパブリッシングツールチェインです. +AsciidoctorはRubyで書かれており, すべての主要オペレーティングシステムで動作します. +Asciidoctorプロジェクトは {uri-repo}[GitHubにホスティング] されています. + +インストールをシンプルにするため, AsciidoctorはRubyGem(gem)パッケージとして, {uri-rubygem}[RubyGems.org] で配布されています. +さらに, Asciidoctorは主要なLinuxディストリビューション用およびmacOS用パッケージとしても配布されています. +AsciidctorはRubyで動作するだけでなく, {uri-asciidoctorj}[AsciidoctorJ]としてJVM上でも動作します. また, {uri-asciidoctorjs}[Asciidoctor.js]としてどのようなJavaScript環境(ブラウザを含む)でも実行できます. + +ifndef::env-site,env-yard[] +このドキュメントには以下の言語版が存在します: + +{uri-rel-file-base}README.adoc[English] +| +{uri-rel-file-base}README-zh_CN.adoc[汉语] +| +{uri-rel-file-base}README-de.adoc[Deutsch] +| +{uri-rel-file-base}README-fr.adoc[Français] endif::[] -.Key documentation +.主なドキュメント [.compact] -* {uri-docs}/what-is-asciidoc[What is Asciidoc?] +* {uri-docs}/what-is-asciidoc[What is AsciiDoc?] * {uri-docs}/asciidoc-writers-guide[AsciiDoc Writer's Guide] -* {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc Syntax Reference] * {uri-docs}/user-manual[Asciidoctor User Manual] +* {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc Syntax Reference] -.Rubyの行く先, Asciidoctorの追うところ -**** -AsciidoctorはJRubyを用いてJVM上でも実行できます. -Javaや他のJVM言語からAsciidoctor APIを直接呼び出すには, {uri-asciidoctorj}[AsciidoctorJ] を使ってください. -AsciidoctorJに基づいた, AsciidoctorプロセッサをApache Maven, GradleやJavadocに統合するプラグインがあります. +ifdef::status[] +//.*Project health* +image:https://img.shields.io/gem/v/asciidoctor.svg[Latest Release, link={uri-gem}] +image:https://img.shields.io/badge/rubydoc.info-{release-version}-blue.svg[library (API) docs,link=https://www.rubydoc.info/gems/asciidoctor/{release-version}] +image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI),link=https://travis-ci.org/asciidoctor/asciidoctor] +image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor),link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] +//image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status,link=https://coveralls.io/r/asciidoctor/asciidoctor] +//image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate,link=https://codeclimate.com/github/asciidoctor/asciidoctor] +image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs,link=https://inch-ci.org/github/asciidoctor/asciidoctor] +endif::[] -AsciidoctorはJavaScriptでも実行可能です. -{uri-asciidoctorjs}[Asciidoctor.js], WebブラウザやNode.jsのようなJavaScript環境で動くAsciidoctorの完全機能版, を生成するために, RubyのソースをJavaScriptにトランスパイルするのに http://opalrb.org[Opal]を使います. -Asciidoctor.jsはChrome, Atom, Brackets や他のウェブベースのツールの拡張機能としてAsciiDocのプレビューのために使われます. -**** +== スポンサー -ifdef::status[] -.*Project health* -image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI), link=https://travis-ci.org/asciidoctor/asciidoctor] -image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor), link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] -//image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status, link=https://coveralls.io/r/asciidoctor/asciidoctor] -image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] -image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs, link="https://inch-ci.org/github/asciidoctor/asciidoctor"] +{uri-project}/supporters[スポンサー] のみなさまが, このプロジェクトをサポートし, より良いテクニカルドキュメンテーションの実現にコミットメントをしてくださっていることに感謝します. +スポンサーのみなさま, ありがとうございます! +みなさまの多くのサポートなくしてAsciidoctorは実現不可能です. + +ifndef::env-site,env-yard[] +Asciidoctorの主な資金的サポートは, *Change Maker* である https://opendevise.com[OpenDevise], *Strategy Sponsors* である https://www.khronos.org/[Khronos Group] とLinda Roberts, そして *Pull Request Backers* である Brian Dominick, Guillaume Grossetie, Abel Salgado Romero によって提供されています. +さらに, {uri-project}/supporters[Community Backers] のページに掲載されているサポーターによって追加的な資金が提供されています. endif::[] -== The Big Picture +https://opencollective.com/asciidoctor[OpenCollective] を通じてスポンサーになることにより, このプロジェクトを支援することができます. -Asciidoctorは下図の左側のパネルに示されるように, 平文で書かれた内容を読み, 右のパネルに描かれるようにHTML5に変換します. -Asciidoctorは枠にとらわれない快適なエクスペリエンスのためにデフォルトスタイルシートをHTML5時メントに適用します. +== 全体像 -image::{image-uri-screenshot}[Preview of AsciiDoc source and corresponding rendered HTML] +Asciidoctorは, 下図左側のようなプレーンテキストを読み込んで, 右側のようなHTML5に変換します. +特別な設定をしなくてもきれいな表示が得られるよう, HTML5の出力にはデフォルトのスタイルシートが適用されます. -== AsciiDoc Processing -AsciidoctorはAsciiDoc文法で書かれたテキストを読み込み解釈し, それからHTML5, DocBook5(4.5)やman(ual)を出力するために内蔵コンバータセットにパースツリーを渡します. -生成された出力をカスタマイズ, あるいは追加のフォーマットをつくるためにあなた自身のコンバータを使うことや {uri-tilt}[Tilt]-supported テンプレートを読み込むオプションがあります. +image::{image-uri-screenshot}[AsciiDocソースとレンダリングされたHTMLのプレビュー] -NOTE: AsciidoctorはオリジナルのAsciiDoc Pythonプロセッサ(`asciidoc.py`)の完全互換です. -Asciidoctorテストスイートは {uri-tests}[> 1,600 tests] をAsciiDoc文法との互換性を保証するために有しています. +== AsciiDocの処理 -クラシックなAsciiDoc文法に加えて, Asciidoctorは追加のマークアップとフォントベースのicons(例えば, `+icon:fire[]+`)などのフォーマッティングオプションとUIエレメント(`+button:[Save]+`)を 受け付けます. -AsciidoctorはHTML5出力をスタイルするため, モダンで, {uri-foundation}[Foundation] に基づいたレスポンシブテーマをも提供します. +Asciidoctorは, AsciiDoc文法で書かれたテキストを読み込んでパースします. 次に内蔵コンバータにパースツリーを渡します. これによりHTML5, DocBook 5やman(マニュアルmanページ)が出力されます. +出力をカスタマイズしたりフォーマットを追加したりしたいときは, ユーザ独自のコンバータや {uri-tilt}[Tilt] 対応テンプレートを使用することができます. -== Requirements +AsciidoctorはオリジナルのAsciiDoc Pythonプロセッサ(`asciidoc.py`)に完全互換です. +Asciidoctorのテストスイートには, AsciiDoc文法との互換性を保証するために {uri-tests}[2350個を超えるテスト] が入っています. -AsciidoctorはLinux, OS X (Mac)とWindowsで動き, 下記の {uri-ruby}[Ruby]実装の一つを必要とします. +Asciidoctorでは, AsciiDocの従来の文法のほかに, Asciidoctorで追加されたマークアップとフォーマッティングオプションが使用できます. フォントベースのアイコン (例えば, `+icon:fire[]+`) やUIエレメント(`+button:[Save]+`)がそれにあたります. +またAsciidoctorは, HTML5出力時のスタイルとして {uri-foundation}[Foundation] に基づいたモダンでレスポンシブなテーマも提供します. -* MRI (Ruby 1.8.7, 1.9.3, 2.0, 2.1, 2.2 & 2.3) -* JRuby (1.7 in Ruby 1.8 and 1.9 modes, 9000) -* Rubinius 2.2.x +== RubyのあるところAsciidoctorも動く + +AsciidoctorはJRubyを用いてJVM上でも実行できます. +Javaや他のJVM言語からAsciidoctor APIを直接呼び出すには, {uri-asciidoctorj}[AsciidoctorJ] を使ってください. +AsciidoctorJを使ったAsciiDocの処理をビルドに直接組み込むビルドツール用プラグインとして, {uri-maven-plugin}[Apache Maven用], {uri-gradle-plugin}[Gradle用], および {uri-asciidoclet}[Javadoc用] が存在します. + +AsciidoctorはJavaScriptでも実行可能です. +Rubyで書かれたソースを {uri-opal}[Opal] を使ってJavaScriptにトランスパイルすることで {uri-asciidoctorjs}[Asciidoctor.js] が作成されています. +Asciidoctor.jsはどんなJavaScript環境(WebブラウザやNode.jsを含む)でも動作する, JavaScript版の完全なAsciidoctorです. +Chrome, Atom, Bracketsやその他のウェブベースのツールで, AsciiDocをプレビューするための拡張機能にAsciidoctor.jsが使われています. + +== 必要条件 + +AsciidoctorはLinux, macOS, およびWindowsで動作し, 下記の {uri-ruby}[Ruby]実装の一つを必要とします. + +* CRuby (aka MRI) 2.3 - 2.6 +* JRuby 9.1 - 9.2 +* TruffleRuby (GraalVM) * Opal (JavaScript) [CAUTION] ==== -もし非英語環境のWindowsを使っているなら, Asciidoctorを起動した時に`Encoding::UndefinedConversionError`に遭遇するでしょう. -これを解決するには使っているコンソールの有効なコードページをUTF-8: +もし非英語環境のWindowsを使っているなら, Asciidoctorを起動した時に `Encoding::UndefinedConversionError` に遭遇するかもしれません. +これを解決するには, 以下のコマンドにより, 使っているコンソールの有効なコードページをUTF-8に変更することを推奨します: chcp 65001 -に変更することを推奨します. 一度この変更をすると, Unicode関連の頭痛の種は消えるでしょう. もしEclipseのようなIDEを使っているなら, 同様にエンコーディングをUTF-8にするのを忘れないでください. -AsciidoctorはUTF-8が使われているところで最高の働きを見せます. +AsciidoctorはUTF-8の環境において最も良好に動作します. ==== -== Installation +== インストール + +Asciidoctorは, (a) 主なLinuxディストリビューションのパッケージマネージャ, (b) macOSのHomebrew, (c) `gem install` コマンド(Windowsユーザに推奨), (d) Asciidoctor Dockerイメージ, あるいは(e) Bundlerを用いてインストールできます. + +Linuxパッケージマネージャを用いてインストールする利点は, もしRubyやRubyGemsライブラリがまだインストールされていなかったら, それらをインストールしてくれることです. + +=== (a) Linuxのパッケージマネージャ + +パッケージマネージャによってインストールされるAsciidoctorは最新バージョンではないかもしれません. +ディストリビューションの各リリースにおいてどのバージョンのAsciidoctorがパッケージされているかを確認するには, パッケージリポジトリを参照してください. + +* https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] +* https://www.archlinux.org/packages/?name=asciidoctor[Arch Linux (asciidoctor)] +* https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] +* https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] +* https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] +* https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] + +パッケージマネージャによってインストールされるバージョンよりも新しいAsciidoctorを使用したい場合は, <> を参照してください. + +==== apk (Alpine Linux) + +Alpine Linuxにgemをインストールするには, ターミナルを開き, 以下を入力してください: + + $ sudo apk add asciidoctor + +==== pacman (Arch Linux) + +Archベースのディストリビューションにgemをインストールするには, ターミナルを開き, 以下を入力してください: + + $ sudo pacman -S asciidoctor + +==== APT + +Debian, またはUbuntuなどDebianベースのディストリビューションでは, APTを使ってAsciidoctorをインストールしてください. +Asciidoctorパッケージをインストールするには, ターミナルを開き, 以下を入力してください: + + $ sudo apt-get install -y asciidoctor + +==== DNF + +Fedora, CentOS, RHELなどRPMベースのLinuxディストリビューションでは, DNFパッケージマネージャを使ってAsciidoctorをインストールしてください. +Asciidoctorパッケージをインストールするには, ターミナルを開き, 以下を入力してください: + + $ sudo dnf install -y asciidoctor + +=== (b) Homebrew (macOS) -Asciidoctorは (a) `gem install` コマンド, (b) Bundler あるいは (c) 有名Linuxディストリビューションのパッケージマネージャ を用いてインストールされます. +macOSでは, パッケージマネージャHomebrewを使用してAsciidoctorをインストールすることができます. +Homebrewをお持ちでない場合は, まず https://brew.sh/[brew.sh] の説明に従ってHomebrewをインストールしてください. +Homebrewをインストールできたら, `asciidoctor` gemをインストールすることができます. +ターミナルを開き, 以下を入力してください: -TIP: Linuxパッケージマネージャを用いてインストールすることの利点は, もしRubyやRubyGemsライブラリがまだインストールされていなかったら, それらを処理してくれることです. -欠点はgemのリリース直後にはすぐには有効にならないことです. -もし最新バージョンを使いたければ, 必ず `gem` コマンドを使いましょう. + $ brew install asciidoctor -=== (a) gem install +Homebrewにより, システムレベルのgemとは別の独立したprefixのパスに `asciidoctor` gemがインストールされます. -ターミナルを開, 入力しましょう (先頭の`$`は除く): +=== (c) Windows + +WindowsでAsciidoctorを使う場合は, 簡単な方法が2つあります. + +==== Chocolatey + +すでにお使いのマシンで https://chocolatey.org[chocolatey] を使用しているなら, 以下の方法を使用することができます: + +[source] +---- +choco install ruby +---- + +そのあとは <> に従ってください. + +==== Rubyinstaller + +https://rubyinstaller.org/downloads/[Rubyinstaller] を使用したい場合は, お使いのWindowsのバージョンに適したRubyinstallerをダウンロードしてRubyをインストールしたあと, <> に従ってください. + +[#gem-install] +=== (d) gem install + +Asciidoctorを `gem install` を使ってインストールするのであれば, その前に https://rvm.io[RVM] を使ってhomeディレクトリ(つまりユーザ領域)にRubyをインストールしておくべきです. +そうすれば, `gem` コマンドを使用して安全にAsciidoctor gemのインストールやアップデートができます. +RVMを使用すると, システムから隔離された場所にgemがインストールされます. + +ターミナルを開き, 以下のように入力してください: $ gem install asciidoctor -もし, 先行リリースバージョン(例えばリリース候補版)をインストールしたければ +もし, 先行リリースバージョン(例えばリリース候補版)をインストールしたければ以下のようにします. $ gem install asciidoctor --pre -.アップグレード -[TIP] -==== -もしAsciidoctorの以前のバージョンあインストール済みであれば, 以下によってアップデートできます: - - $ gem update asciidoctor +=== (e) Docker -もし gem update の代わりに `gem install` を使ってgemを新バージョンにした場合, 複数バージョンばインストールされるでしょう. -そのときは, 以下のgemコマンドで古いバージョンを削除しましょう: - - $ gem cleanup asciidoctor -==== +{uri-install-docker}[Installing Asciidoctor using Docker]を参照してください. -=== (b) Bundler +=== (f) Bundler -. プロジェクトフォルダーのルート(かカレントディレクトリ)にGemfileを作成 +. プロジェクトのルートフォルダ(またはカレントディレクトリ)にGemfileを作成 . `asciidoctor` gemをGemfileに以下のように追加: + -[source] +[source,subs=attributes+] ---- source 'https://rubygems.org' gem 'asciidoctor' -# or specify the version explicitly -# gem 'asciidoctor', '1.5.4' +# または明示的にバージョンを指定 +# gem 'asciidoctor', '{release-version}' ---- . Gemfileを保存 @@ -176,111 +278,89 @@ $ bundle gemをアップグレードするには, Gemfileで新バージョンを指定し, `bundle` を再び実行してください. -`bundle update` は他のgemもアップデートするため推奨されて *いない* ので, 思わぬ結果になるかも知れません. - -=== (c) Linux package managers - -==== DNF (Fedora 21 or greater) - -dnfを使いFedora21かそれ以上にインストールするには, ターミナルを開き, 以下を入力してください: +`bundle update` を(gemを指定せずに)行うことは推奨 *されません* . 他のgemもアップデートされて思わぬ結果になるかもしれないためです. - $ sudo dnf install -y asciidoctor +== アップグレード -gemをアップグレードするには: +オペレーティングシステムのパッケージマネージャでAsciidoctorをインストールしたのであれば, おそらくパッケージは自動的にアップデートされるように設定されています. その場合は, gemを手動でアップデートする必要はありません. - $ sudo dnf update -y asciidoctor +=== apk (Alpine Linux) -TIP: お使いのシステムは自動的にrpmパッケージをアップデートするよう設定されているかも知れません.その場合, gemのアップデートのためにあなたがすべきことはありません. +gemをアップグレードするには, 以下を使用してください: -==== apt-get (Debian, Ubuntu, Mint) + $ sudo apk add -u asciidoctor -Debian, UbuntuまたはMintにインストールするには, ターミナルを開き, 以下を入力してください: +=== APT - $ sudo apt-get install -y asciidoctor +gemをアップグレードするには, 以下を使用してください: -gemをアップグレードするには: - $ sudo apt-get upgrade -y asciidoctor -TIP: お使いのシステムは自動的にdebパッケージをアップデートするよう設定されているかも知れません.その場合, gemのアップデートのためにあなたがすべきことはありません. +=== DNF -パッケージマネージャ(apt-get)によってインストールされたバージョンのAsciidoctorは最新リリースのAsciidoctorではないかもしれません. -ディストリビューションのリリース毎に, どのバージョンがパッケージされているかはパッケージリポジトリを調べてください. +gemをアップグレードするには, 以下を使用してください: -* https://packages.debian.org/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[asciidoctor package by Debian release] -* http://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[asciidoctor package by Ubuntu release] -* https://community.linuxmint.com/software/view/asciidoctor[asciidoctor package by Mint release] + $ sudo dnf update -y asciidoctor -[CAUTION] -==== -パッケージマネージャによって管理されているgemをアップデートするのに `gem udpate` コマンドを使うなといわれるでしょう. -そのようなことをするのは, パッケージマネージャがファイル(/usr/local下にインストールされた)を追跡できなくなるためにシステムが不安定な状態にするためです. -単純に, システムgemはパッケージマネージャによってのみ管理されるべきです. - -もし, パッケージマネージャによってインストールされたのより新しいバージョンのAsciidoctorを使いたければ, http://rvm.io[RVM] や https://github.com/rbenv/rbenv[rbenv]を使ってRubyをホームディレクトリ(すなわち, ユーザースペース)にインストールするべきです. -それから, 安心して `gem` コマンドをAsciidoctorのアップデート, インストールのために使うことができます. -RVMやrbenvを使っているなら, gemはシステムからは孤立した場所にインストールされます. -==== +=== Homebrew (macOS) -==== apk (Alpine Linux) +gemをアップグレードするには, 以下を使用してください: -Alpine Linuxにgemをインストールするには, ターミナルを開き, 以下を入力してください: + $ brew update + $ brew upgrade asciidoctor - $ sudo apk add asciidoctor +=== gem install -gemをアップグレードするには: +`gem` コマンドを使ってAsciidoctorをインストールした場合は, 新しいバージョンのAsciidoctorがリリースされたら手動でアップグレードする必要があります. +以下を入力することでアップグレードできます: - $ sudo apk add -u asciidoctor - -TIP: お使いのシステムは自動的にapkパッケージをアップデートするよう設定されているかも知れません.その場合, gemのアップデートのためにあなたがすべきことはありません. + $ gem install asciidoctor -=== Other installation options +`gem install` を使って新しいバージョンのgemをインストールすると, 複数のバージョンがインストールされた状態になります. +以下のコマンドを使って古いバージョンを削除してください. -* {uri-install-docker}[Installing Asciidoctor using Docker] -* {uri-install-osx-doc}[Installing Asciidoctor on Mac OS X] -// at the moment, the following entry is just a reiteration of the information in this README -//* {uri-install-doc}[Installing the Asciidoctor toolchain] + $ gem cleanup asciidoctor -== Usage +== 使い方 -Asciidoctorのインストールに成功すれば, `asciidoctor` コマンドラインインターフェース(CLI)がPATH中で有効になります. -確認のために, 以下をターミナルで実行しましょう: +Asciidoctorのインストールが成功すると, `asciidoctor` コマンドがPATHに存在するようになり, Asciidoctorのコマンドラインインターフェース(CLI)が使用できるようになります. +確認のために, ターミナルで以下を実行しましょう: $ asciidoctor --version -AsciidoctorのバージョンとRuby環境についての情報がターミナルに出力されたのを見ることができるはずです. +AsciidoctorのバージョンとRuby環境についての情報がターミナルに出力されるはずです. -[.output] +[.output,subs=attributes+] .... -Asciidoctor 1.5.4 [http://asciidoctor.org] -Runtime Environment (ruby 2.2.2p95 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) +Asciidoctor {release-version} [https://asciidoctor.org] +Runtime Environment (ruby 2.6.0p0 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... -AsciidoctorはAPIを提供します. -APIは他のRubyソフトウェア, Rails, SinatraとGitHub, そして他の言語, Java (via {uri-asciidoctorj}[AsciidoctorJ] )とJavaScript (via {uri-asciidoctorjs}[Asciidoctor.js])との統合を意図しています. +AsciidoctorはAPIも提供します. +APIは他のRubyソフトウェア, たとえばRails, Sinatra, GitHub, そして他の言語, たとえばJava ({uri-asciidoctorj}[AsciidoctorJ] 経由)やJavaScript ({uri-asciidoctorjs}[Asciidoctor.js] 経由)と組み合わせて使用するためのものです. -=== Command line interface (CLI) +=== コマンドラインインターフェース (CLI) -`asciidoctorjs` コマンドはAsciidoctorをコマンドライン(つまりターミナル)から起動することを可能にします. +`asciidoctor` コマンドによりコマンドライン(つまりターミナル)からAsciidoctorを起動することができます. -次のコマンドはファイルREADME.adocをHTMLに変換し, 結果を同じディレクトリのREADME.htmlに保存します. -生成されたHTMLファイルの名前はソースファイル依存し, その拡張子を `.html` に変えます. +次のコマンドにより, README.adocというファイルがHTMLに変換され, 結果が同じディレクトリのREADME.htmlとして保存されます. +生成されるHTMLファイルの名前は, ソースファイルのファイル名の拡張子を `.html` に替えたものとなります. $ asciidoctor README.adoc -Asciidoctorプロセッサに様々なフラグやスイッチを与えることで制御できます.それは以下を用いて調べることができます: +さまざまなフラグやスイッチを与えることでAsciidoctorプロセッサをコントロールすることができます. フラグやスイッチの説明は以下のコマンドで表示されます: $ asciidoctor --help -例えば, ファイルを異なるディレクトリに書き出すには: +例えば, ファイルを異なるディレクトリに書き出すには以下を使用します: $ asciidoctor -D output README.adoc -`asciidoctor` {uri-manpage}[man page] はコマンドライン・インタフェースの完全なリファレンスを提供します. +コマンドラインインタフェースの完全なリファレンスは `asciidoctor` の {uri-manpage}[manページ] にあります. -`asciidoctor` コマンドの使い方についてもっと学ぶには以下を参照してください. +`asciidoctor` コマンドの使い方の詳細については以下を参照してください. -* {uri-render-doc}[How do I convert a document?] +* {uri-convert-doc}[How do I convert a document?] * {uri-themes-doc}[How do I use the Asciidoctor stylesheet factory to produce custom themes?] === Ruby API @@ -290,106 +370,114 @@ [source] require 'asciidoctor' -それから, AsciiDocソースファイルをHTMLファイルに変換できます: +そうすると, 以下のようにしてAsciiDocソースファイルをHTMLファイルに変換できます: [source] Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe -WARNING: AsciidoctorをAPI経由で使っている時, デフォルトのセーフモードは `:secure` です. -セキュアモードでは, `include` ディレクティブを含むいくつかのコア機能は無効化されています. -もしこれらの機能を有効化したい場合, 明示的にセーフモードを `:server` (推奨)か `:safe` にする必要があります. +WARNING: AsciidoctorをAPI経由で使っているとき, デフォルトのセーフモードは `:secure` (セキュアモード)です. +セキュアモードでは, `include` ディレクティブを含むいくつかのコア機能が無効化されています. +これらの機能を有効化したい場合, 明示的にセーフモードを `:server` (推奨)か `:safe` にする必要があります. -AsciiDoc文字列を埋め込みHTML(HTMLページヘの挿入)へ変換することもできます: +AsciiDoc文字列を, 埋め込み用HTML(HTMLページヘの挿入用)に変換することもできます: [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- -もし完全なHTMLドキュメントを求めるのであれば, `header_footer` オプションを以下の通り有効にしてください: +もし完全なHTMLドキュメントが必要であれば, 以下のように `header_footer` オプションを有効にしてください: [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- -パースされたドキュメントにアクセスしたいのなら, 変換を個々のステップに分割することが出来ます: +パースされたドキュメントにアクセスしたい場合は, 変換を複数のステップに分割します: [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- -Asciidoctorの生成する出力が気に入らないのであれば, _あなたはそれを変更できる_ ことを忘れないでください! -Asciidoctorはパースされたドキュメントを生成された出力に変換する処理を扱うカスタムコンバーターをサポートしています. +Asciidoctorの生成する出力が気に入らない場合は, _あなたはそれを変更できる_ ことを忘れないでください! +パースされたドキュメントを出力形式に変換するコンバータは, カスタマイズが可能です. -断片的な出力をカスタマイズする簡単な方法の一つはテンプレートコンバーターを使うことです. -テンプレートコンバーターによって, ドキュメント中のあらゆるノードの変換を扱うために {uri-tilt}[Tilt]-supportedテンプレートファイルを使うことができます. +出力を部分的にカスタマイズする簡単な方法としてはテンプレートコンバータがあります. +テンプレートコンバータでは, ドキュメントの各ノードの変換に {uri-tilt}[Tilt]対応テンプレートファイルを使うことができます. -そのようにすれば, 出力を100%制御することが _できます_ . -APIの使い方や出力のカスタマイズ方法についてのより詳しい情報は {uri-user-manual}[user manual] を参照してください. +さまざまな方法を使って出力は100%制御することが _できます_ . +APIの使い方や出力のカスタマイズ方法についてのより詳しい情報は {uri-user-manual}[ユーザマニュアル] を参照してください. -== Contributing +== コントリビューション -{uri-freesoftware}[free software] の精神においては, _everyone_ がこのプロジェクトを改良するのをたすけることが勧められている. -もしエラーや手抜かりをソースコード, ドキュメント, あるいはウェブサイトに見つけたのなら, 恥じることなく修正と共にpull requestの開設やissueの送信をしてください. -New contributors are always welcome! +新しいコントリビューションを常に歓迎します! +もしソースコード, ドキュメント, あるいはウェブサイトに間違いや不備を見つけたら遠慮なく, イシューを作成するか, 修正をおこなってpull requestを作成してください. -*あなた* にもできることがあります: +*あなた* にもできることがあります: -* 先行バージョン(alpha, beta or preview)の使用 +* 先行バージョン(alpha, beta, またはpreview版)の使用 * バグレポート * 新機能提案 -* ドキュメントの執筆 -* 仕様の執筆 -* コーディング -- _パッチでも, 足りなすぎるなんてことはありません_ +* ドキュメントの執筆または編集 +* テストをつけてコードを書くこと -- _どのようなパッチであれ小さすぎるなどということはありません_ ** typoの修正 ** コメントの追加 ** 一貫性のないホワイトスペースの除去 ** テストの記述! * リファクタリング -* {uri-issues}[issues] の修正 -* パッチの批評 +* {uri-issues}[イシュー] の解決 +* パッチのレビュー -{uri-contribute}[Contributing] ガイドはどうやってスタイルをつくるか, issueを送るか, 機能リクエスト, コーディング, ドキュメンテーションをAsciidoctor Projectにするかの情報を提供しています. +Asciidoctorプロジェクトにイシュー, 機能リクエスト, コード, ドキュメントを送る際の, 作成方法, スタイル, および送り方は, {uri-contribute}[Contributing] ガイドに記載されています. -== Getting Help +== 助けを得る -Asciidoctorプロジェクトはあなたが簡単に著作を書いて, 配布するのをたすけるため開発されています. -しかしあなたのフィードバックなしにはできません! -ディスカッションリストで, Twitterで, チャットルームで, 質問し, プロジェクトのあらゆる側面について話し合うようお勧めします. +Asciidoctorは, コンテンツの執筆と公開を簡単にするために開発されています. +しかしあなたからのフィードバックがなくてはAsciidoctorの開発は進みません! +ディスカッションリスト, Twitter, チャットルームを使って, 質問をしたりプロジェクトのさまざまな側面について話し合ったりすることをお勧めします. -Discussion list (Nabble):: {uri-discuss} -Twitter:: #asciidoctor hashtag or @asciidoctor mention -Chat (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] +チャット(Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] +ディスカッションリスト(Nabble):: {uri-discuss} +Twitter:: ハッシュタグ https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] またはメンション https://twitter.com/asciidoctor[@asciidoctor] ifdef::env-github[] -Further information and documentation about Asciidoctor can be found on the project's website. +以下のプロジェクトサイトに, Asciidoctorに関するさらに詳しい情報やドキュメントがあります. -{uri-project}/[Home] | {uri-news}[News] | {uri-docs}[Docs] +{uri-project}[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] -GitHub上のAsciidoctorはプロジェクトのソースコード, イシュートラッカー, サブプロジェクトを管理しています. +GitHub上のAsciidoctorのorganizationではプロジェクトのソースコード, イシュートラッカー, サブプロジェクトが管理されています. + +ソースリポジトリ(git):: {uri-repo} +イシュートラッカー:: {uri-issues} +GitHub上のAsciidoctorのorganization:: {uri-org} -Source repository (git):: {uri-repo} -Issue tracker:: {uri-issues} -Asciidoctor organization on GitHub:: {uri-org} +== ライセンス -== Copyright and Licensing +Copyright (C) 2012-2019 Dan Allen, Sarah White, Ryan Waldron, and the individual contributors to Asciidoctor. +本ソフトウェアはMITライセンスのもとで使用できます. -Copyright (C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. -Free use of this software is granted under the terms of the MIT License. +ライセンスの詳細については {uri-license}[LICENSE] ファイルを参照してください. -See the {uri-license}[LICENSE] file for details. +== 作者 -== Authors +*Asciidoctor* は https://github.com/mojavelinux[Dan Allen] と https://github.com/graphitefriction[Sarah White] がリードし, Asciidoctorの素晴らしきコミュニティの {uri-contributors}[数多くのメンバ] からコントリビューションを受けてきました. +このプロジェクトは https://github.com/nickh[Nick Hengeveld] の {uri-prototype}[プロトタイプ] をベースに https://github.com/erebor[Ryan Waldron] により2012年から創始されました. -*Asciidoctor* is led by https://github.com/mojavelinux[Dan Allen] and https://github.com/graphitefriction[Sarah White] and has received contributions from {uri-contributors}[many other individuals] in Asciidoctor's awesome community. -The project was initiated in 2012 by https://github.com/erebor[Ryan Waldron] and based on {uri-prototype}[a prototype] written by https://github.com/nickh[Nick Hengeveld]. +*AsciiDoc* は Stuart Rackham により創始され, AsciiDocコミュニティの数多くのメンバからコントリビューションを受けてきました. -*AsciiDoc* was started by Stuart Rackham and has received contributions from many other individuals in the AsciiDoc community. +ifndef::env-site[] +== 変更履歴 + +ifeval::[{safe-mode-level} < 20] +include::CHANGELOG.adoc[tag=compact,leveloffset=+1] +endif::[] + +過去のリリースの完全な変更点リストについては {uri-changelog}[CHANGELOG] を参照してください. +endif::[] diff -Nru asciidoctor-1.5.5/README-zh_CN.adoc asciidoctor-2.0.10/README-zh_CN.adoc --- asciidoctor-1.5.5/README-zh_CN.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/README-zh_CN.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -1,18 +1,30 @@ = Asciidoctor Dan Allen ; Sarah White ; Ryan Waldron +v2.0.10, 2019-05-31 // settings: :page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} -ifdef::env-github[:status:] +ifndef::env-github[:icons: font] +ifdef::env-github[] +:status: +:outfilesuffix: .adoc +:caution-caption: :fire: +:important-caption: :exclamation: +:note-caption: :paperclip: +:tip-caption: :bulb: +:warning-caption: :warning: +endif::[] +// Variables: +:release-version: 2.0.10 // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js -:uri-project: http://asciidoctor.org +:uri-project: https://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news @@ -27,7 +39,7 @@ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc -:uri-license: {uri-rel-file-base}LICENSE.adoc +:uri-license: {uri-rel-file-base}LICENSE :uri-tests: {uri-rel-tree-base}test :uri-discuss: http://discuss.asciidoctor.org :uri-irc: irc://irc.freenode.org/#asciidoctor @@ -42,100 +54,104 @@ :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html -:uri-foundation: http://foundation.zurb.com +:uri-foundation: https://foundation.zurb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png -{uri-project}/[Asciidoctor] 是一个 _快速_ 文本处理器和发布工具链,它可以将 {uri-what-is-asciidoc}[AsciiDoc] 文档转化成 HTML5、 DocBook 5 (或 4.5) 以及其他格式。 +{uri-project}/[Asciidoctor] 是一个 _快速_ 文本处理器和发布工具链,它可以将 {uri-what-is-asciidoc}[AsciiDoc] 文档转化成 HTML 5、 DocBook 5 以及其他格式。 Asciidoctor 由 Ruby 编写,打包成 RubyGem,然后发布到 {uri-rubygem}[RubyGems.org] 上。 这个 gem 还被包含道几个 Linux 发行版中,其中包括 Fedora、Debian 和 Ubuntu。 -Asciidoctor 是开源的,{uri-repo}[代码托管在 GitHub],并且是以 {uri-license}[MIT 协议]授权。 +Asciidoctor 是开源的,{uri-repo}[代码]托管在 GitHub,遵从 {uri-license}[MIT] 协议。 + +该文档有如下语言的翻译版: -.该文档有如下语言的翻译版: * {uri-rel-file-base}README.adoc[English] * {uri-rel-file-base}README-fr.adoc[Français] +* {uri-rel-file-base}README-jp.adoc[日本語] .关键文档 [.compact] -* {uri-docs}/what-is-asciidoc[什么是 Asciidoctor?] -* {uri-docs}/asciidoc-writers-guide[AsciiDoc 作家指南] +* {uri-docs}/what-is-asciidoc[Asciidoctor 是什么?] +* {uri-docs}/asciidoc-writers-guide[AsciiDoc 写作指南] * {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc 语法快速参考] * {uri-docs}/user-manual[Asciidoctor 用户手册] .Ruby 所至, Asciidoctor 相随 **** -使用 JRuby 可以让 Asciidoctor 运行在 Java 虚拟机上。 -使用 {uri-asciidoctorj}[AsciidoctorJ] 就可以让 Java 或者其他 Java 虚拟机语言直接调用 Asciidoctor API。 -基于 AsciidoctorJ 有好多好多插件可用,这些插件可以将 Asciidoctor 整合到 Apache Maven,Gradle 或 Javadoc 构建中。 +使用 JRuby 让 Asciidoctor 运行在 Java 虚拟机上。 +使用 {uri-asciidoctorj}[AsciidoctorJ] 直接调用 Asciidoctor 的 API 运行在 Java 或者其他 Java 虚拟机中。 +基于 AsciidoctorJ 有好多插件可用,这些插件可以将 Asciidoctor 整合到 Apache Maven,Gradle 或 Javadoc 构建中。 Asciidoctor 也可以运行在 JavaScript 上。 -我们可以使用 http://opalrb.org[Opal] 将 Ruby 源码编译成 JavaScript 并生成 {uri-asciidoctorjs}[Asciidoctor.js],这是一个全功能版的 Asciidoctor,可以运行在任意的 JavaScript 环境中,比如 Web 浏览器 或 Node.js。 -Asciidoctor.js 被用于 AsciiDoc 预览,支持 Chrome 扩展,Atom,Brackets 或其他基于 Web 的工具。 +我们可以使用 https://opalrb.com[Opal] 将 Ruby 源码编译成 JavaScript 生成 {uri-asciidoctorjs}[Asciidoctor.js] 文件,这是一个全功能版的 Asciidoctor,可以运行在任意的 JavaScript 环境中,比如 Web 浏览器 或 Node.js。 +Asciidoctor.js 被用于预览 AsciiDoc,支持 Chrome 扩展,Atom,Brackets 或其他基于 Web 的工具。 **** -ifdef::badges[] +ifdef::status[] .*Project health* image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI), link=https://travis-ci.org/asciidoctor/asciidoctor] image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor), link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] //image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status, link=https://coveralls.io/r/asciidoctor/asciidoctor] -image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] +//image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs, link="https://inch-ci.org/github/asciidoctor/asciidoctor"] endif::[] [#the-big-picture] -== 全局概况 +== 整体概况 -Asciidoctor 以纯文本格式读取内容,见下图左边的面板,将它转换成 HTML5 呈现在右侧面板中。 -Asciidoctor 将默认的样式表应用到 HTML5 文档上,提供一个愉快的开箱即用的体验。 +Asciidoctor 以纯文本格式读取内容,见下图左边的面板,并将它转换成 HTML 5 呈现在右侧面板中。 +Asciidoctor 将默认的样式表应用到 HTML 5 文档上,提供一个愉快的开箱即用的体验。 image::{image-uri-screenshot}[AsciiDoc 源文预览和相应的 HTML 渲染] [#asciidoc-processing] == AsciiDoc Processing -Asciidoctor 读取并处理以 AsciiDoc 语法写作的文件,然后然后将解析出来的解析树交给内置的转化器生成 HTML5,DocBook 5 (或 4.5) 或帮助手册页面输出。 +Asciidoctor 会读取并处理用 AsciiDoc 语法写的文件,然后将解析出来的解析树参数交给内置的转化器去生成 HTML 5,DocBook 5 或帮助手册页面输出。 你可以选择使用你自己的转化器或者加载 {uri-tilt}[Tilt] - 支持通过模板来自定义输出或产生附加的格式。 NOTE: Asciidoctor是为了直接替换原 AsciiDoc Python 处理器(`asciidoc.py`)。 -Asciidoctor 测试套件含有 {uri-tests}[> 1,600 测试用例] 来确保和 AsciiDoc 语法的兼容性。 +Asciidoctor 测试套件含有 {uri-tests}[> 1,600 测试示例] 来确保和 AsciiDoc 语法的兼容性。 -除了经典的 AsciiDoc 语法,Asciidoctor 还添加额外的标记和格式设置选项,例如 font-based 图标(例如: `+icon:fire[]+`)和 UI 元素(例如: `+button:[Save]+`)。 -Asciidoctor 还提供了一个基于 {uri-foundation}[Foundation] 的现代的、响应式主题来美化 HTML5 输出。 +除了传统的 AsciiDoc 语法,Asciidoctor 还添加额外的标记和格式设置选项,例如 font-based 图标(例如: `+icon:fire[]+`)和 UI 元素(例如: `+button:[Save]+`)。 +Asciidoctor 还提供了一个基于 {uri-foundation}[Foundation] 的现代化的、响应式主题来美化 HTML 5 输出。 [#requirements] == 要求 -Asciidoctor 可以在 Linux,OSX (Mac) 和 Windows,并且需要下面其中一个 {uri-ruby}[Ruby] 实现: +Asciidoctor 可以运行在 Linux,OSX (Mac) 和 Windows 系统,但需要安装下面任意一个 {uri-ruby}[Ruby] 环境去实现: -* MRI (Ruby 1.8.7, 1.9.3, 2.0, 2.1, 2.2 & 2.3) -* JRuby (1.7 in Ruby 1.8 and 1.9 modes, 9000) -* Rubinius 2.2.x +* CRuby (aka MRI) 2.3 - 2.6 +* JRuby 9.1 - 9.2 +* TruffleRuby (GraalVM) * Opal (JavaScript) 我们欢迎你来帮助在这些以及其他平台测试 Asciidoctor。 -参考 <<{idprefix}contributing,Contributing>> 来学习如何参与进来。 + +请参考 <<{idprefix}contributing,Contributing>> 来了解如何参与。 [CAUTION] ==== -如果你使用一个非英语的 Windows 环境,当调用 Asciidoctor 时,可能会碰到 `Encoding::UndefinedConversionError` 错误。 +如果在非英语的 Windows 环境,当你去调用 Asciidoctor 时,可能会碰到 `Encoding::UndefinedConversionError` 的错误提示。 为了解决这个问题,我们建议将控制台的编码更改为 UTF-8: chcp 65001 一旦你做了这个改变,所有的编码问题,都将迎刃而解。 -只要你在任何地方都是 UTF-8,Asciidoctor 总会工作地很好。 +如果你使用的是像 Eclipse 这样的 IDE 集成开发工具,你也需要确保他被你设置为 UTF-8 编码。 +使用 UTF-8 能使 Asciidoctor 在任何地方都能正常工作。 ==== [#installation] == 安装 -Asciidoctor 可以通过三种方式安装:(a) 使用 `gem install` 命令;(b) 使用 Bundler;(c) 流行的 Linux 发行版的包管理器 +Asciidoctor 可以通过三种方式安装(a)`gem install` 命令;(b)Bundler打包编译;(c)流行的 Linux 发行版的包管理器 -TIP: 使用 Linux 包管理器安装的好处是如果 Ruby 和 RubyGems 库没有在你的机器上安装,它会一并安装上去。 +TIP: 使用 Linux 包管理器安装的好处是如果你机器在之前没有安装 Ruby 和 RubyGems 库,当你选择这种方式安装时它们会一并安装上去。 不利的是在 gem 发布之后,这类安装包并不是立即可用。 -如果你需要最新版,你应该总是优先使用 `gem` 命令安装。 +如果你需要安装最新版,你应该总是优先使用 `gem` 命令安装。 [#a-gem-install] === (a) gem 安装 @@ -151,12 +167,12 @@ .升级 [TIP] ==== -如果你安装有一个旧版本的 Asciidoctor,你可以使用下面的命令来升级: +如果你安装有的是旧版本 Asciidoctor,你可以使用下面的命令来升级: $ gem update asciidoctor -如果使用 `gem install` 命令来安装一个新版本的 gem 来代替升级,则会安装多个版本。 -如果是这种情况,使用下面的 gem 命令来移除旧版本: +如果使用 `gem install` 命令来安装一个新版本的 gem 来代替升级,会安装多个版本。 +这种情况,你可以使用下面的 gem 命令来移除旧版本: $ gem cleanup asciidoctor ==== @@ -167,12 +183,12 @@ . 在项目的根目录(或者当前路径),创建一个 `Gemfile` 文件; . 在这个文件中添加 `asciidoctor` gem 如下: + -[source] +[source,subs=attributes+] ---- source 'https://rubygems.org' gem 'asciidoctor' # 或者明确指明版本 -# gem 'asciidoctor', '1.5.4' +# gem 'asciidoctor', '{release-version}' ---- . 保存 `Gemfile` 文件 @@ -216,7 +232,7 @@ 请查看发行版的包库,来确定每个发行版是打包的哪个版本。 * https://packages.debian.org/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Debian 发行版中的 asciidoctor] -* http://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Ubuntu 发行版中的 asciidoctor] +* https://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Ubuntu 发行版中的 asciidoctor] * https://community.linuxmint.com/software/view/asciidoctor[Mint 发行版中的 asciidoctor] [CAUTION] @@ -225,7 +241,7 @@ 这样做会使系统进入不一致的状态,包管理工具将不再跟踪相关文件(通常安装在 /usr/local 下。) 简单地说,系统的 gem 只能由包管理器进行管理。 -如果你想使用一个比包管理器安装的更新版本的 Asciidoctor,你应该使用 http://rvm.io[RVM] 在你的用户家目录(比如:用户空间)下安装 Ruby。 +如果你想使用一个比包管理器安装的更新版本的 Asciidoctor,你应该使用 https://rvm.io[RVM] 在你的用户家目录(比如:用户空间)下安装 Ruby。 然后,你就可以放心地使用 `gem` 命令来安装或者更新 Asciidoctor gem。 当使用 RVM 时,gem 将被安装到与系统隔离的位置。 ==== @@ -259,14 +275,14 @@ 你应该看到关于 Asciidoctor 和 Ruby 环境信息将打印到你的终端上。 -[.output] +[.output,subs=attributes+] .... -Asciidoctor 1.5.4 [http://asciidoctor.org] -Runtime Environment (ruby 2.2.2p95 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) +Asciidoctor {release-version} [https://asciidoctor.org] +Runtime Environment (ruby 2.4.1p111 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... Asciidoctor 还提供了一套 API。 -这套 API 是为了整合其他的 Ruby 软件,例如 Rails、Sinatra、Github,甚至其他语言,比如 Java (通过 {uri-asciidoctorj}[AsciidoctorJ]) 和 JavaScript (通过 {uri-asciidoctorjs}[Asciidoctor.js])。 +这套 API 是为了整合其他的 Ruby 软件,例如 Rails、Sinatra、GitHub,甚至其他语言,比如 Java (通过 {uri-asciidoctorj}[AsciidoctorJ]) 和 JavaScript (通过 {uri-asciidoctorjs}[Asciidoctor.js])。 [#command-line-interface-cli] === 命令行(CLI) @@ -314,7 +330,7 @@ [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- @@ -322,7 +338,7 @@ [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- @@ -330,7 +346,7 @@ [source] ---- -content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' +content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert @@ -365,10 +381,10 @@ ** 清理多余空白 ** 编写测试! * 重构代码 -* 修复 {uri-issues}[议题] +* 修复 {uri-issues}[issues] * 审查补丁 -{uri-contribute}[贡献] 指南提供了如何提供贡献,包括如何创建、修饰和提交问题、特性、需求、代码和文档给 Asciidoctor 项目。 +{uri-contribute}[贡献指南]提供了如何提供贡献,包括如何创建、修饰和提交问题、特性、需求、代码和文档给 Asciidoctor 项目。 [#getting-help] == 获得帮助 @@ -378,7 +394,7 @@ 我们鼓励你在讨论组、Twitter或聊天室里,提问为题,讨论项目的方方面面, 讨论组 (Nabble):: {uri-discuss} -Twitter:: #asciidoctor 井号或 @asciidoctor 提醒 +Twitter:: https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] 来加入话题 或 https://twitter.com/asciidoctor[@asciidoctor] at并提醒我们 聊天 (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] ifdef::env-github[] @@ -387,7 +403,7 @@ {uri-project}/[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] -Asciidoctor 组织在 Github 托管代码、议案跟踪和相关子项目。 +Asciidoctor 组织在 GitHub 托管代码、议案跟踪和相关子项目。 代码库 (git):: {uri-repo} 议案跟踪:: {uri-issues} @@ -396,7 +412,7 @@ [#copyright-and-licensing] == 版权和协议 -Copyright (C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. +Copyright (C) 2012-2019 Dan Allen, Sarah White, Ryan Waldron, and the individual contributors to Asciidoctor. 这个软件的免费使用是在MIT许可条款授予的。 请看 {uri-license}[版权声明] 文件来获取更多详细信息。 diff -Nru asciidoctor-1.5.5/run-tests.sh asciidoctor-2.0.10/run-tests.sh --- asciidoctor-1.5.5/run-tests.sh 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/run-tests.sh 2019-08-18 16:11:54.000000000 +0000 @@ -4,7 +4,9 @@ # This script will execute against all supported Ruby versions if "all" is the first argument to the script. if [ "$1" = "all" ]; then - rvm 1.8@asciidoctor-dev,jruby@asciidoctor-dev,rbx@asciidoctor-dev,1.9@asciidoctor-dev,2.0@asciidoctor-dev,2.1@asciidoctor-dev "do" ./run-tests.sh + rvm 2.3,2.6,jruby-9.2 "do" ./run-tests.sh else - rake > /tmp/asciidoctor-test-results.txt 2>&1; cat /tmp/asciidoctor-test-results.txt + GEM_PATH=$(bundle exec ruby -e "puts ENV['GEM_HOME']") + CONSOLE_OUTPUT=$(rake test:all 2>&1) + echo "$CONSOLE_OUTPUT" fi diff -Nru asciidoctor-1.5.5/_settings-README.adoc asciidoctor-2.0.10/_settings-README.adoc --- asciidoctor-1.5.5/_settings-README.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/_settings-README.adoc 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -// settings: -:page-layout: base -:idprefix: -:idseparator: - -:source-language: ruby -:language: {source-language} -ifdef::env-github[:badges:] -// URIs: -:uri-org: https://github.com/asciidoctor -:uri-repo: {uri-org}/asciidoctor -:uri-asciidoctorj: {uri-org}/asciidoctorj -:uri-asciidoctorjs: {uri-org}/asciidoctor.js -:uri-project: http://asciidoctor.org -ifdef::env-site[:uri-project: link:] -:uri-docs: {uri-project}/docs -:uri-news: {uri-project}/news -:uri-manpage: {uri-project}/man/asciidoctor -:uri-issues: {uri-repo}/issues -:uri-contributors: {uri-repo}/graphs/contributors -:uri-rel-file-base: link: -:uri-rel-tree-base: link: -ifdef::env-site[] -:uri-rel-file-base: {uri-repo}/blob/master/ -:uri-rel-tree-base: {uri-repo}/tree/master/ -endif::[] -:uri-changelog: {uri-rel-file-base}CHANGELOG.adoc -:uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc -:uri-license: {uri-rel-file-base}LICENSE.adoc -:uri-tests: {uri-rel-tree-base}test -:uri-discuss: http://discuss.asciidoctor.org -:uri-irc: irc://irc.freenode.org/#asciidoctor -:uri-rubygem: https://rubygems.org/gems/asciidoctor -:uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc -:uri-user-manual: {uri-docs}/user-manual -:uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor -//:uri-install-doc: {uri-docs}/install-toolchain -:uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx -:uri-render-doc: {uri-docs}/render-documents -:uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory -:uri-gitscm-repo: https://github.com/git/git-scm.com -:uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb -:uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html -:uri-foundation: http://foundation.zurb.com -:uri-tilt: https://github.com/rtomayko/tilt -:uri-ruby: https://ruby-lang.org -// images: -:image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png diff -Nru asciidoctor-1.5.5/tasks/bundler.rake asciidoctor-2.0.10/tasks/bundler.rake --- asciidoctor-1.5.5/tasks/bundler.rake 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/tasks/bundler.rake 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,6 @@ +# frozen_string_literal: true +begin + require 'bundler/gem_tasks' +rescue LoadError + warn $!.message +end diff -Nru asciidoctor-1.5.5/tasks/console.rake asciidoctor-2.0.10/tasks/console.rake --- asciidoctor-1.5.5/tasks/console.rake 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/tasks/console.rake 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,5 @@ +# frozen_string_literal: true +desc 'Open an irb session preloaded with this library' +task :console do + sh 'bundle console', verbose: false +end diff -Nru asciidoctor-1.5.5/tasks/coverage.rake asciidoctor-2.0.10/tasks/coverage.rake --- asciidoctor-1.5.5/tasks/coverage.rake 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/tasks/coverage.rake 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,5 @@ +# frozen_string_literal: true +desc 'Activates coverage' +task :coverage do + ENV['COVERAGE'] = 'true' +end diff -Nru asciidoctor-1.5.5/tasks/cucumber.rake asciidoctor-2.0.10/tasks/cucumber.rake --- asciidoctor-1.5.5/tasks/cucumber.rake 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/tasks/cucumber.rake 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,10 @@ +# frozen_string_literal: true +begin + require 'cucumber/rake/task' + Cucumber::Rake::Task.new :features do |t| + t.cucumber_opts = %w(-f progress) + t.cucumber_opts << '--no-color' if ENV['CI'] + end +rescue LoadError + warn $!.message +end diff -Nru asciidoctor-1.5.5/tasks/dependents.rake asciidoctor-2.0.10/tasks/dependents.rake --- asciidoctor-1.5.5/tasks/dependents.rake 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/tasks/dependents.rake 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,56 @@ +# frozen_string_literal: true +namespace :build do + desc 'Trigger builds for all dependent projects on Travis CI' + task :dependents do + if ENV['TRAVIS'].to_s == 'true' + next unless ENV['TRAVIS_PULL_REQUEST'].to_s == 'false' && + ENV['TRAVIS_TAG'].to_s.empty? && + (ENV['TRAVIS_JOB_NUMBER'].to_s.end_with? '.1') + end + # NOTE The TRAVIS_TOKEN env var must be defined in Travis interface. + # Retrieve this token using the `travis token` command. + # The GitHub user corresponding to the Travis user must have write access to the repository. + # After granting permission, sign into Travis and resync the repositories. + next unless (token = ENV['TRAVIS_TOKEN']) + require 'json' + require 'net/http' + require 'open-uri' + require 'yaml' + %w( + asciidoctor/asciidoctor.js + asciidoctor/asciidoctorj + asciidoctor/asciidoctor-diagram + asciidoctor/asciidoctor-reveal.js + ).each do |project| + org, name, branch = project.split '/', 3 + branch ||= 'master' + project = [org, name, branch] * '/' + header = { + 'Content-Type' => 'application/json', + 'Accept' => 'application/json', + 'Travis-API-Version' => '3', + 'Authorization' => %(token #{token}) + } + if (commit_hash = ENV['TRAVIS_COMMIT']) + commit_memo = %( (#{commit_hash.slice 0, 8})\n\nhttps://github.com/#{ENV['TRAVIS_REPO_SLUG'] || 'asciidoctor/asciidoctor'}/commit/#{commit_hash}) + end + config = YAML.load open(%(https://raw.githubusercontent.com/#{project}/.travis-upstream-only.yml)) {|fd| fd.read } rescue {} + payload = { + 'request' => { + 'branch' => branch, + 'message' => %(Build triggered by Asciidoctor#{commit_memo}), + 'config' => config + } + }.to_json + (http = Net::HTTP.new 'api.travis-ci.org', 443).use_ssl = true + request = Net::HTTP::Post.new %(/repo/#{org}%2F#{name}/requests), header + request.body = payload + response = http.request request + if response.code == '202' + puts %(Successfully triggered build on #{project} repository) + else + warn %(Unable to trigger build on #{project} repository: #{response.code} - #{response.message}) + end + end + end +end diff -Nru asciidoctor-1.5.5/tasks/test.rake asciidoctor-2.0.10/tasks/test.rake --- asciidoctor-1.5.5/tasks/test.rake 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/tasks/test.rake 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,30 @@ +# frozen_string_literal: true +def prepare_test_env + # rather than hardcoding gc settings in test task, + # could use https://gist.github.com/benders/788695 + ENV['RUBY_GC_MALLOC_LIMIT'] = 128_000_000.to_s + ENV['RUBY_GC_OLDMALLOC_LIMIT'] = 128_000_000.to_s + ENV['RUBY_GC_HEAP_INIT_SLOTS'] = 750_000.to_s + ENV['RUBY_GC_HEAP_FREE_SLOTS'] = 750_000.to_s + ENV['RUBY_GC_HEAP_GROWTH_MAX_SLOTS'] = 50_000.to_s + ENV['RUBY_GC_HEAP_GROWTH_FACTOR'] = 2.to_s +end + +begin + require 'rake/testtask' + Rake::TestTask.new :test do |t| + prepare_test_env + puts %(LANG: #{ENV['LANG']}) if ENV.key? 'TRAVIS_BUILD_ID' + t.libs << 'test' + t.pattern = 'test/**/*_test.rb' + t.verbose = true + t.warning = true + end +rescue LoadError + warn $!.message +end + +namespace :test do + desc 'Run unit and feature tests' + task all: [:test, :features] +end diff -Nru asciidoctor-1.5.5/test/api_test.rb asciidoctor-2.0.10/test/api_test.rb --- asciidoctor-1.5.5/test/api_test.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/api_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,1715 @@ +# frozen_string_literal: true +require_relative 'test_helper' + +context 'API' do + context 'Load' do + test 'should load input file' do + sample_input_path = fixture_path('sample.adoc') + doc = File.open(sample_input_path, Asciidoctor::FILE_READ_MODE) {|file| Asciidoctor.load file, safe: Asciidoctor::SafeMode::SAFE } + assert_equal 'Document Title', doc.doctitle + assert_equal File.expand_path(sample_input_path), doc.attr('docfile') + assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') + assert_equal '.adoc', doc.attr('docfilesuffix') + end + + test 'should load input file from filename' do + sample_input_path = fixture_path('sample.adoc') + doc = Asciidoctor.load_file(sample_input_path, safe: Asciidoctor::SafeMode::SAFE) + assert_equal 'Document Title', doc.doctitle + assert_equal File.expand_path(sample_input_path), doc.attr('docfile') + assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') + assert_equal '.adoc', doc.attr('docfilesuffix') + end + + test 'should load input file from pathname' do + sample_input_path = Pathname fixture_path 'sample.adoc' + doc = Asciidoctor.load_file sample_input_path, safe: :safe + assert_equal 'Document Title', doc.doctitle + assert_equal sample_input_path.expand_path.to_s, (doc.attr 'docfile') + assert_equal sample_input_path.expand_path.dirname.to_s, (doc.attr 'docdir') + assert_equal '.adoc', (doc.attr 'docfilesuffix') + end + + test 'should load input file with alternate file extension' do + sample_input_path = fixture_path 'sample-alt-extension.asciidoc' + doc = Asciidoctor.load_file sample_input_path, safe: :safe + assert_equal 'Document Title', doc.doctitle + assert_equal File.expand_path(sample_input_path), doc.attr('docfile') + assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') + assert_equal '.asciidoc', doc.attr('docfilesuffix') + end + + test 'should coerce encoding of file to UTF-8' do + old_external = Encoding.default_external + old_internal = Encoding.default_internal + old_verbose = $VERBOSE + begin + $VERBOSE = nil # disable warnings since we have to modify constants + input_path = fixture_path 'encoding.adoc' + Encoding.default_external = Encoding.default_internal = Encoding::IBM437 + output = Asciidoctor.convert_file input_path, to_file: false, safe: :safe + assert_equal Encoding::UTF_8, output.encoding + assert_include 'Romé', output + ensure + Encoding.default_external = old_external + Encoding.default_internal = old_internal + $VERBOSE = old_verbose + end + end + + test 'should not load file with unrecognized encoding' do + begin + tmp_input = Tempfile.new %w(test- .adoc), encoding: Encoding::IBM437 + # NOTE using a character whose code differs between UTF-8 and IBM437 + tmp_input.write %(ƒ\n) + tmp_input.close + exception = assert_raises ArgumentError do + Asciidoctor.load_file tmp_input.path, safe: :safe + end + expected_message = 'Failed to load AsciiDoc document - source is either binary or contains invalid Unicode data' + assert_include expected_message, exception.message + ensure + tmp_input.close! + end + end + + test 'should not load invalid file' do + sample_input_path = fixture_path('hello-asciidoctor.pdf') + exception = assert_raises ArgumentError do + Asciidoctor.load_file(sample_input_path, safe: Asciidoctor::SafeMode::SAFE) + end + expected_message = 'Failed to load AsciiDoc document - source is either binary or contains invalid Unicode data' + assert_include expected_message, exception.message + # verify we have the correct backtrace (should be at least in the first 5 lines) + assert_match(/reader\.rb.*prepare_lines/, exception.backtrace[0..4].join(?\n)) + end + + test 'should convert filename that contains non-ASCII characters independent of default encodings' do + old_external = Encoding.default_external + old_internal = Encoding.default_internal + old_verbose = $VERBOSE + begin + $VERBOSE = nil # disable warnings since we have to modify constants + tmp_input = Tempfile.new %w(test-UTF8- .adoc) + tmp_input.write %(UTF8\n) + tmp_input.close + Encoding.default_external = Encoding.default_internal = Encoding::IBM437 + tmp_output = tmp_input.path.sub '.adoc', '.html' + Asciidoctor.convert_file tmp_input.path, safe: :safe, attributes: 'linkcss !copycss' + assert File.exist? tmp_output + output = File.binread tmp_output + refute_empty output + # force encoding to UTF-8 and we should see that the string is in fact UTF-8 encoded + output = String.new output, encoding: Encoding::UTF_8 + assert_equal Encoding::UTF_8, output.encoding + assert_include 'UTF8', output + ensure + tmp_input.close! + FileUtils.rm_f tmp_output + Encoding.default_external = old_external + Encoding.default_internal = old_internal + $VERBOSE = old_verbose + end + end + + test 'should load input IO' do + input = StringIO.new <<~'EOS' + Document Title + ============== + + preamble + EOS + doc = Asciidoctor.load(input, safe: Asciidoctor::SafeMode::SAFE) + assert_equal 'Document Title', doc.doctitle + refute doc.attr?('docfile') + assert_equal doc.base_dir, doc.attr('docdir') + end + + test 'should load input string' do + input = <<~'EOS' + Document Title + ============== + + preamble + EOS + doc = Asciidoctor.load(input, safe: Asciidoctor::SafeMode::SAFE) + assert_equal 'Document Title', doc.doctitle + refute doc.attr?('docfile') + assert_equal doc.base_dir, doc.attr('docdir') + end + + test 'should load input string array' do + input = <<~'EOS' + Document Title + ============== + + preamble + EOS + doc = Asciidoctor.load(input.lines, safe: Asciidoctor::SafeMode::SAFE) + assert_equal 'Document Title', doc.doctitle + refute doc.attr?('docfile') + assert_equal doc.base_dir, doc.attr('docdir') + end + + test 'should load nil input' do + doc = Asciidoctor.load nil, safe: :safe + refute_nil doc + assert_empty doc.blocks + end + + test 'should accept attributes as array' do + # NOTE there's a tab character before idseparator + doc = Asciidoctor.load('text', attributes: %w(toc sectnums source-highlighter=coderay idprefix idseparator=-)) + assert_kind_of Hash, doc.attributes + assert doc.attr?('toc') + assert_equal '', doc.attr('toc') + assert doc.attr?('sectnums') + assert_equal '', doc.attr('sectnums') + assert doc.attr?('source-highlighter') + assert_equal 'coderay', doc.attr('source-highlighter') + assert doc.attr?('idprefix') + assert_equal '', doc.attr('idprefix') + assert doc.attr?('idseparator') + assert_equal '-', doc.attr('idseparator') + end + + test 'should accept attributes as empty array' do + doc = Asciidoctor.load('text', attributes: []) + assert_kind_of Hash, doc.attributes + end + + test 'should accept attributes as string' do + doc = Asciidoctor.load 'text', attributes: %(toc sectnums\nsource-highlighter=coderay\nidprefix\nidseparator=-) + assert_kind_of Hash, doc.attributes + assert doc.attr?('toc') + assert_equal '', doc.attr('toc') + assert doc.attr?('sectnums') + assert_equal '', doc.attr('sectnums') + assert doc.attr?('source-highlighter') + assert_equal 'coderay', doc.attr('source-highlighter') + assert doc.attr?('idprefix') + assert_equal '', doc.attr('idprefix') + assert doc.attr?('idseparator') + assert_equal '-', doc.attr('idseparator') + end + + test 'should accept values containing spaces in attributes string' do + doc = Asciidoctor.load('text', attributes: %(idprefix idseparator=- note-caption=Note\\ to\\\tself toc)) + assert_kind_of Hash, doc.attributes + assert doc.attr?('idprefix') + assert_equal '', doc.attr('idprefix') + assert doc.attr?('idseparator') + assert_equal '-', doc.attr('idseparator') + assert doc.attr?('note-caption') + assert_equal "Note to\tself", doc.attr('note-caption') + end + + test 'should accept attributes as empty string' do + doc = Asciidoctor.load('text', attributes: '') + assert_kind_of Hash, doc.attributes + end + + test 'should accept attributes as nil' do + doc = Asciidoctor.load('text', attributes: nil) + assert_kind_of Hash, doc.attributes + end + + test 'should accept attributes if hash like' do + class Hashish + def initialize + @table = { 'toc' => '' } + end + + def keys + @table.keys + end + + def [](key) + @table[key] + end + end + + doc = Asciidoctor.load('text', attributes: Hashish.new) + assert_kind_of Hash, doc.attributes + assert doc.attributes.has_key?('toc') + end + + test 'should not expand value of docdir attribute if specified via API' do + docdir = 'virtual/directory' + doc = document_from_string '', safe: :safe, attributes: { 'docdir' => docdir } + assert_equal docdir, (doc.attr 'docdir') + assert_equal docdir, doc.base_dir + end + + test 'converts block to output format when convert is called' do + doc = Asciidoctor.load 'paragraph text' + expected = <<~'EOS'.chop +
    +

    paragraph text

    +
    + EOS + assert_equal 1, doc.blocks.length + assert_equal :paragraph, doc.blocks[0].context + assert_equal expected, doc.blocks[0].convert + end + + test 'render method on node is aliased to convert method' do + input = <<~'EOS' + paragraph text + + * list item + EOS + doc = Asciidoctor.load input + assert_equal 2, doc.blocks.length + ([doc] + doc.blocks).each do |block| + assert_equal block.method(:convert), block.method(:render) + end + inline = Asciidoctor::Inline.new doc.blocks[0], :image, nil, type: 'image', target: 'tiger.png' + assert_equal inline.method(:convert), inline.method(:render) + end + + test 'should output timestamps by default' do + doc = document_from_string 'text', backend: :html5, attributes: nil + result = doc.convert + assert doc.attr?('docdate') + refute doc.attr? 'reproducible' + assert_xpath '//div[@id="footer-text" and contains(string(.//text()), "Last updated")]', result, 1 + end + + test 'should not output timestamps if reproducible attribute is set in HTML 5' do + doc = document_from_string 'text', backend: :html5, attributes: { 'reproducible' => '' } + result = doc.convert + assert doc.attr?('docdate') + assert doc.attr?('reproducible') + assert_xpath '//div[@id="footer-text" and contains(string(.//text()), "Last updated")]', result, 0 + end + + test 'should not output timestamps if reproducible attribute is set in DocBook' do + doc = document_from_string 'text', backend: :docbook, attributes: { 'reproducible' => '' } + result = doc.convert + assert doc.attr?('docdate') + assert doc.attr?('reproducible') + assert_xpath '/article/info/date', result, 0 + end + + test 'should not modify options argument' do + options = { safe: Asciidoctor::SafeMode::SAFE } + options.freeze + sample_input_path = fixture_path('sample.adoc') + begin + Asciidoctor.load_file sample_input_path, options + rescue + flunk %(options argument should not be modified) + end + end + + test 'should not modify attributes Hash argument' do + attributes = {} + attributes.freeze + options = { + safe: Asciidoctor::SafeMode::SAFE, + attributes: attributes, + } + sample_input_path = fixture_path('sample.adoc') + begin + Asciidoctor.load_file sample_input_path, options + rescue + flunk %(attributes argument should not be modified) + end + end + + test 'should be able to restore header attributes after call to convert' do + input = <<~'EOS' + = Document Title + :foo: bar + + content + + :foo: baz + + content + EOS + doc = Asciidoctor.load input + assert_equal 'bar', (doc.attr 'foo') + doc.convert + assert_equal 'baz', (doc.attr 'foo') + doc.restore_attributes + assert_equal 'bar', (doc.attr 'foo') + end + + test 'should track file and line information with blocks if sourcemap option is set' do + doc = Asciidoctor.load_file fixture_path('sample.adoc'), sourcemap: true + + refute_nil doc.source_location + assert_equal 'sample.adoc', doc.file + assert_equal 1, doc.lineno + + section_1 = doc.sections[0] + assert_equal 'Section A', section_1.title + refute_nil section_1.source_location + assert_equal 'sample.adoc', section_1.file + assert_equal 10, section_1.lineno + + section_2 = doc.sections[1] + assert_equal 'Section B', section_2.title + refute_nil section_2.source_location + assert_equal 'sample.adoc', section_2.file + assert_equal 18, section_2.lineno + + table_block = section_2.blocks[1] + assert_equal :table, table_block.context + refute_nil table_block.source_location + assert_equal 'sample.adoc', table_block.file + assert_equal 22, table_block.lineno + first_cell = table_block.rows.body[0][0] + refute_nil first_cell.source_location + assert_equal 'sample.adoc', first_cell.file + assert_equal 23, first_cell.lineno + second_cell = table_block.rows.body[0][1] + refute_nil second_cell.source_location + assert_equal 'sample.adoc', second_cell.file + assert_equal 23, second_cell.lineno + last_cell = table_block.rows.body[-1][-1] + refute_nil last_cell.source_location + assert_equal 'sample.adoc', last_cell.file + assert_equal 24, last_cell.lineno + + last_block = section_2.blocks[-1] + assert_equal :ulist, last_block.context + refute_nil last_block.source_location + assert_equal 'sample.adoc', last_block.file + assert_equal 28, last_block.lineno + + list_items = last_block.blocks + refute_nil list_items[0].source_location + assert_equal 'sample.adoc', list_items[0].file + assert_equal 28, list_items[0].lineno + + refute_nil list_items[1].source_location + assert_equal 'sample.adoc', list_items[1].file + assert_equal 29, list_items[1].lineno + + refute_nil list_items[2].source_location + assert_equal 'sample.adoc', list_items[2].file + assert_equal 30, list_items[2].lineno + + doc = Asciidoctor.load_file fixture_path('master.adoc'), sourcemap: true, safe: :safe + + section_1 = doc.sections[0] + assert_equal 'Chapter A', section_1.title + refute_nil section_1.source_location + assert_equal fixture_path('chapter-a.adoc'), section_1.file + assert_equal 1, section_1.lineno + end + + test 'should track file and line information on list items if sourcemap option is set' do + doc = Asciidoctor.load_file fixture_path('lists.adoc'), sourcemap: true + + first_section = doc.blocks[1] + + unordered_basic_list = first_section.blocks[0] + assert_equal 11, unordered_basic_list.lineno + + unordered_basic_list_items = unordered_basic_list.find_by context: :list_item + assert_equal 11, unordered_basic_list_items[0].lineno + assert_equal 12, unordered_basic_list_items[1].lineno + assert_equal 13, unordered_basic_list_items[2].lineno + + unordered_max_nesting = first_section.blocks[1] + assert_equal 16, unordered_max_nesting.lineno + unordered_max_nesting_items = unordered_max_nesting.find_by context: :list_item + assert_equal 16, unordered_max_nesting_items[0].lineno + assert_equal 17, unordered_max_nesting_items[1].lineno + assert_equal 18, unordered_max_nesting_items[2].lineno + assert_equal 19, unordered_max_nesting_items[3].lineno + assert_equal 20, unordered_max_nesting_items[4].lineno + assert_equal 21, unordered_max_nesting_items[5].lineno + + checklist = first_section.blocks[2] + assert_equal 24, checklist.lineno + checklist_list_items = checklist.find_by context: :list_item + assert_equal 24, checklist_list_items[0].lineno + assert_equal 25, checklist_list_items[1].lineno + assert_equal 26, checklist_list_items[2].lineno + assert_equal 27, checklist_list_items[3].lineno + + ordered_basic = first_section.blocks[3] + assert_equal 30, ordered_basic.lineno + ordered_basic_list_items = ordered_basic.find_by context: :list_item + assert_equal 30, ordered_basic_list_items[0].lineno + assert_equal 31, ordered_basic_list_items[1].lineno + assert_equal 32, ordered_basic_list_items[2].lineno + + ordered_nested = first_section.blocks[4] + assert_equal 35, ordered_nested.lineno + ordered_nested_list_items = ordered_nested.find_by context: :list_item + assert_equal 35, ordered_nested_list_items[0].lineno + assert_equal 36, ordered_nested_list_items[1].lineno + assert_equal 37, ordered_nested_list_items[2].lineno + assert_equal 38, ordered_nested_list_items[3].lineno + assert_equal 39, ordered_nested_list_items[4].lineno + + ordered_max_nesting = first_section.blocks[5] + assert_equal 42, ordered_max_nesting.lineno + ordered_max_nesting_items = ordered_max_nesting.find_by context: :list_item + assert_equal 42, ordered_max_nesting_items[0].lineno + assert_equal 43, ordered_max_nesting_items[1].lineno + assert_equal 44, ordered_max_nesting_items[2].lineno + assert_equal 45, ordered_max_nesting_items[3].lineno + assert_equal 46, ordered_max_nesting_items[4].lineno + assert_equal 47, ordered_max_nesting_items[5].lineno + + labeled_singleline = first_section.blocks[6] + assert_equal 50, labeled_singleline.lineno + labeled_singleline_items = labeled_singleline.find_by context: :list_item + assert_equal 50, labeled_singleline_items[0].lineno + assert_equal 50, labeled_singleline_items[1].lineno + assert_equal 51, labeled_singleline_items[2].lineno + assert_equal 51, labeled_singleline_items[3].lineno + + labeled_multiline = first_section.blocks[7] + assert_equal 54, labeled_multiline.lineno + labeled_multiline_items = labeled_multiline.find_by context: :list_item + assert_equal 54, labeled_multiline_items[0].lineno + assert_equal 55, labeled_multiline_items[1].lineno + assert_equal 56, labeled_multiline_items[2].lineno + assert_equal 57, labeled_multiline_items[3].lineno + + qanda = first_section.blocks[8] + assert_equal 61, qanda.lineno + qanda_items = qanda.find_by context: :list_item + assert_equal 61, qanda_items[0].lineno + assert_equal 62, qanda_items[1].lineno + assert_equal 63, qanda_items[2].lineno + assert_equal 63, qanda_items[3].lineno + + mixed = first_section.blocks[9] + assert_equal 66, mixed.lineno + mixed_items = mixed.find_by(context: :list_item) {|block| block.text? } + assert_equal 66, mixed_items[0].lineno + assert_equal 67, mixed_items[1].lineno + assert_equal 68, mixed_items[2].lineno + assert_equal 69, mixed_items[3].lineno + assert_equal 70, mixed_items[4].lineno + assert_equal 71, mixed_items[5].lineno + assert_equal 72, mixed_items[6].lineno + assert_equal 73, mixed_items[7].lineno + assert_equal 74, mixed_items[8].lineno + assert_equal 75, mixed_items[9].lineno + assert_equal 77, mixed_items[10].lineno + assert_equal 78, mixed_items[11].lineno + assert_equal 79, mixed_items[12].lineno + assert_equal 80, mixed_items[13].lineno + assert_equal 81, mixed_items[14].lineno + assert_equal 82, mixed_items[15].lineno + assert_equal 83, mixed_items[16].lineno + + unordered_complex_list = first_section.blocks[10] + assert_equal 86, unordered_complex_list.lineno + unordered_complex_items = unordered_complex_list.find_by context: :list_item + assert_equal 86, unordered_complex_items[0].lineno + assert_equal 87, unordered_complex_items[1].lineno + assert_equal 88, unordered_complex_items[2].lineno + assert_equal 92, unordered_complex_items[3].lineno + assert_equal 96, unordered_complex_items[4].lineno + end + + # NOTE this does not work for a list continuation that attached to a grandparent + test 'should assign correct source location to blocks that follow a detached list continuation' do + input = <<~'EOS' + * parent + ** child + + + + paragraph attached to parent + + **** + sidebar outside list + **** + EOS + + doc = document_from_string input, sourcemap: true + assert_equal [5, 8], (doc.find_by context: :paragraph).map(&:lineno) + end + + test 'should assign correct source location if section occurs on last line of input' do + input = <<~'EOS' + = Document Title + + == Section A + + content + + == Section B + EOS + + doc = document_from_string input, sourcemap: true + assert_equal [1, 3, 7], (doc.find_by context: :section).map(&:lineno) + end + + test 'should allow sourcemap option on document to be modified before document is parsed' do + doc = Asciidoctor.load_file fixture_path('sample.adoc'), parse: false + doc.sourcemap = true + refute doc.parsed? + doc = doc.parse + assert doc.parsed? + + section_1 = doc.sections[0] + assert_equal 'Section A', section_1.title + refute_nil section_1.source_location + assert_equal 'sample.adoc', section_1.file + assert_equal 10, section_1.lineno + end + + test 'find_by should return Array of blocks anywhere in document tree that match criteria' do + input = <<~'EOS' + = Document Title + + preamble + + == Section A + + paragraph + + -- + Exhibit A:: + + + [#tiger.animal] + image::tiger.png[Tiger] + -- + + image::shoe.png[Shoe] + + == Section B + + paragraph + EOS + + doc = Asciidoctor.load input + result = doc.find_by context: :image + assert_equal 2, result.size + assert_equal :image, result[0].context + assert_equal 'tiger.png', result[0].attr('target') + assert_equal :image, result[1].context + assert_equal 'shoe.png', result[1].attr('target') + end + + test 'find_by should return an empty Array if no matches are found' do + input = 'paragraph' + doc = Asciidoctor.load input + result = doc.find_by context: :section + refute_nil result + assert_equal 0, result.size + end + + test 'find_by should discover blocks inside AsciiDoc table cells if traverse_documents selector option is true' do + input = <<~'EOS' + paragraph in parent document (before) + + [%footer,cols=2*] + |=== + a| + paragraph in nested document (body) + |normal table cell + + a| + paragraph in nested document (foot) + |normal table cell + |=== + + paragraph in parent document (after) + EOS + + doc = Asciidoctor.load input + result = doc.find_by context: :paragraph + assert_equal 2, result.size + result = doc.find_by context: :paragraph, traverse_documents: true + assert_equal 4, result.size + end + + test 'find_by should return inner document of AsciiDoc table cell if traverse_documents selector option is true' do + input = <<~'EOS' + |=== + a|paragraph in nested document + |=== + EOS + + doc = Asciidoctor.load input + inner_doc = doc.blocks[0].rows.body[0][0].inner_document + result = doc.find_by traverse_documents: true + assert_include inner_doc, result + result = doc.find_by context: :inner_document, traverse_documents: true + assert_equal 1, result.size + assert_equal inner_doc, result[0] + end + + test 'find_by should match table cells' do + input = <<~'EOS' + |=== + |a |b |c + + |1 + one + a|NOTE: 2, as it goes. + l| + 3 + you + me + |=== + EOS + + doc = document_from_string input + table = doc.blocks[0] + first_head_cell = table.rows.head[0][0] + first_body_cell = table.rows.body[0][0] + result = doc.find_by + assert_include first_head_cell, result + assert_include first_body_cell, result + assert_equal 'a', first_head_cell.source + assert_equal ['a'], first_head_cell.lines + assert_equal %(1\none), first_body_cell.source + assert_equal ['1', 'one'], first_body_cell.lines + result = doc.find_by context: :table_cell, style: :asciidoc + assert_equal 1, result.size + assert_kind_of Asciidoctor::Table::Cell, result[0] + assert_equal :asciidoc, result[0].style + assert_equal 'NOTE: 2, as it goes.', result[0].source + end + + test 'find_by should return Array of blocks that match style criteria' do + input = <<~'EOS' + [square] + * one + * two + * three + + --- + + * apples + * bananas + * pears + EOS + + doc = Asciidoctor.load input + result = doc.find_by context: :ulist, style: 'square' + assert_equal 1, result.size + assert_equal :ulist, result[0].context + end + + test 'find_by should return Array of blocks that match role criteria' do + input = <<~'EOS' + [#tiger.animal] + image::tiger.png[Tiger] + + image::shoe.png[Shoe] + EOS + + doc = Asciidoctor.load input + result = doc.find_by context: :image, role: 'animal' + assert_equal 1, result.size + assert_equal :image, result[0].context + assert_equal 'tiger.png', result[0].attr('target') + end + + test 'find_by should return the document title section if context selector is :section' do + input = <<~'EOS' + = Document Title + + preamble + + == Section One + + content + EOS + doc = Asciidoctor.load input + result = doc.find_by context: :section + refute_nil result + assert_equal 2, result.size + assert_equal :section, result[0].context + assert_equal 'Document Title', result[0].title + end + + test 'find_by should only return results for which the block argument yields true' do + input = <<~'EOS' + == Section + + content + + === Subsection + + content + EOS + doc = Asciidoctor.load input + result = doc.find_by(context: :section) {|sect| sect.level == 1 } + refute_nil result + assert_equal 1, result.size + assert_equal :section, result[0].context + assert_equal 'Section', result[0].title + end + + test 'find_by should reject node and its children if block returns :reject' do + input = <<~'EOS' + paragraph 1 + + ==== + paragraph 2 + + term:: + + + paragraph 3 + ==== + + paragraph 4 + EOS + doc = Asciidoctor.load input + result = doc.find_by do |candidate| + ctx = candidate.context + if ctx == :example + :reject + elsif ctx == :paragraph + true + end + end + refute_nil result + assert_equal 2, result.size + assert_equal :paragraph, result[0].context + assert_equal :paragraph, result[1].context + end + + test 'find_by should reject node matched by ID selector if block returns :reject' do + input = <<~'EOS' + [.rolename] + paragraph 1 + + [.rolename#idname] + paragraph 2 + EOS + doc = Asciidoctor.load input + result = doc.find_by id: 'idname', role: 'rolename' + refute_nil result + assert_equal 1, result.size + assert_equal doc.blocks[1], result[0] + result = doc.find_by(id: 'idname', role: 'rolename') { :reject } + refute_nil result + assert_equal 0, result.size + end + + test 'find_by should accept node matched by ID selector if block returns :prune' do + input = <<~'EOS' + [.rolename] + paragraph 1 + + [.rolename#idname] + ==== + paragraph 2 + ==== + EOS + doc = Asciidoctor.load input + result = doc.find_by id: 'idname', role: 'rolename' + refute_nil result + assert_equal 1, result.size + assert_equal doc.blocks[1], result[0] + result = doc.find_by(id: 'idname', role: 'rolename') { :prune } + refute_nil result + assert_equal 1, result.size + assert_equal doc.blocks[1], result[0] + end + + test 'find_by should accept node but reject its children if block returns :prune' do + input = <<~'EOS' + ==== + paragraph 2 + + term:: + + + paragraph 3 + ==== + EOS + doc = Asciidoctor.load input + result = doc.find_by do |candidate| + if candidate.context == :example + :prune + end + end + refute_nil result + assert_equal 1, result.size + assert_equal :example, result[0].context + end + + test 'find_by should stop looking for blocks when StopIteration is raised' do + input = <<~'EOS' + paragraph 1 + + ==== + paragraph 2 + + **** + paragraph 3 + **** + ==== + + paragraph 4 + + * item + + + paragraph 5 + EOS + doc = Asciidoctor.load input + + stop_at_next = false + result = doc.find_by do |candidate| + raise StopIteration if stop_at_next + if candidate.context == :paragraph + candidate.parent.context == :sidebar ? (stop_at_next = true) : true + end + end + refute_nil result + assert_equal 3, result.size + assert_equal 'paragraph 1', result[0].content + assert_equal 'paragraph 2', result[1].content + assert_equal 'paragraph 3', result[2].content + end + + test 'find_by should stop looking for blocks when filter block returns :stop directive' do + input = <<~'EOS' + paragraph 1 + + ==== + paragraph 2 + + **** + paragraph 3 + **** + ==== + + paragraph 4 + + * item + + + paragraph 5 + EOS + doc = Asciidoctor.load input + + stop_at_next = false + result = doc.find_by do |candidate| + next :stop if stop_at_next + if candidate.context == :paragraph + candidate.parent.context == :sidebar ? (stop_at_next = true) : true + end + end + refute_nil result + assert_equal 3, result.size + assert_equal 'paragraph 1', result[0].content + assert_equal 'paragraph 2', result[1].content + assert_equal 'paragraph 3', result[2].content + end + + test 'find_by should only return one result when matching by id' do + input = <<~'EOS' + == Section + + content + + [#subsection] + === Subsection + + content + EOS + doc = Asciidoctor.load input + result = doc.find_by(context: :section, id: 'subsection') + refute_nil result + assert_equal 1, result.size + assert_equal :section, result[0].context + assert_equal 'Subsection', result[0].title + end + + test 'find_by should stop seeking once match is found' do + input = <<~'EOS' + == Section + + content + + [#subsection] + === Subsection + + [#last] + content + EOS + doc = Asciidoctor.load input + visited_last = false + result = doc.find_by(id: 'subsection') do |candidate| + visited_last = true if candidate.id == 'last' + true + end + refute_nil result + assert_equal 1, result.size + refute visited_last + end + + test 'find_by should return an empty Array if the id criteria matches but the block argument yields false' do + input = <<~'EOS' + == Section + + content + + [#subsection] + === Subsection + + content + EOS + doc = Asciidoctor.load input + result = doc.find_by(context: :section, id: 'subsection') {|sect| false } + refute_nil result + assert_equal 0, result.size + end + + test 'find_by should not crash if dlist entry does not have description' do + input = 'term without description::' + doc = Asciidoctor.load input + result = doc.find_by + refute_nil result + assert_equal 3, result.size + assert_kind_of Asciidoctor::Document, result[0] + assert_kind_of Asciidoctor::List, result[1] + assert_kind_of Asciidoctor::ListItem, result[2] + end + + test 'dlist item should always have two entries for terms and desc' do + [ + 'term w/o desc::', + %(term::\nalias::), + %(primary:: 1\nsecondary:: 2), + ].each do |input| + dlist = (Asciidoctor.load input).blocks[0] + dlist.items.each do |item| + assert_equal 2, item.size + assert_kind_of ::Array, item[0] + assert_kind_of Asciidoctor::ListItem, item[1] if item[1] + end + end + end + + test 'timings are recorded for each step when load and convert are called separately' do + sample_input_path = fixture_path 'asciidoc_index.txt' + (Asciidoctor.load_file sample_input_path, timings: (timings = Asciidoctor::Timings.new)).convert + refute_equal '0.00000', '%05.5f' % timings.read_parse.to_f + refute_equal '0.00000', '%05.5f' % timings.convert.to_f + refute_equal timings.read_parse, timings.total + end + + test 'can disable syntax highlighter by setting value to nil in :syntax_highlighters option' do + doc = Asciidoctor.load '', safe: :safe, syntax_highlighters: { 'coderay' => nil }, attributes: { 'source-highlighter' => 'coderay' } + assert_nil doc.syntax_highlighter + end + + test 'can substitute a custom syntax highlighter factory instance using the :syntax_highlighter_factory option' do + input = <<~'EOS' + [source,ruby] + ---- + puts 'Hello, World!' + ---- + EOS + # NOTE this tests both the lazy loading and the custom factory + syntax_hl_factory = Asciidoctor::SyntaxHighlighter::CustomFactory.new 'github' => (Asciidoctor::SyntaxHighlighter.for 'html-pipeline') + doc = Asciidoctor.load input, safe: :safe, syntax_highlighter_factory: syntax_hl_factory, attributes: { 'source-highlighter' => 'github' } + refute_nil doc.syntax_highlighter + assert_kind_of Asciidoctor::SyntaxHighlighter::HtmlPipelineAdapter, doc.syntax_highlighter + assert_include '
    ', doc.convert
    +    end
    +
    +    test 'can substitute an extended syntax highlighter factory implementation using the :syntax_highlighters option' do
    +      input = <<~'EOS'
    +      [source,ruby]
    +      ----
    +      puts 'Hello, World!'
    +      ----
    +      EOS
    +      syntax_hl_factory_class = Class.new do
    +        include Asciidoctor::SyntaxHighlighter::DefaultFactory
    +
    +        def for name
    +          super 'highlight.js'
    +        end
    +      end
    +      doc = Asciidoctor.load input, safe: :safe, syntax_highlighter_factory: syntax_hl_factory_class.new, attributes: { 'source-highlighter' => 'coderay' }
    +      refute_nil doc.syntax_highlighter
    +      output = doc.convert
    +      refute_include 'CodeRay', output
    +      assert_include 'hljs', output
    +    end
    +  end
    +
    +  context 'Convert' do
    +    test 'render_file is aliased to convert_file' do
    +      assert_equal Asciidoctor.method(:convert_file), Asciidoctor.method(:render_file)
    +    end
    +
    +    test 'render is aliased to convert' do
    +      assert_equal Asciidoctor.method(:convert), Asciidoctor.method(:render)
    +    end
    +
    +    test 'should convert source document to embedded document when header_footer is false' do
    +      sample_input_path = fixture_path('sample.adoc')
    +      sample_output_path = fixture_path('sample.html')
    +
    +      [{ header_footer: false }, { header_footer: false, to_file: sample_output_path }].each do |opts|
    +        begin
    +          Asciidoctor.convert_file sample_input_path, opts
    +          assert File.exist?(sample_output_path)
    +          output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
    +          refute_empty output
    +          assert_xpath '/html', output, 0
    +          assert_css '#preamble', output, 1
    +        ensure
    +          FileUtils.rm(sample_output_path)
    +        end
    +      end
    +    end
    +
    +    test 'should convert source document to standalone document string when to_file is false and standalone is true' do
    +      sample_input_path = fixture_path('sample.adoc')
    +
    +      output = Asciidoctor.convert_file sample_input_path, standalone: true, to_file: false
    +      refute_empty output
    +      assert_xpath '/html', output, 1
    +      assert_xpath '/html/head', output, 1
    +      assert_xpath '/html/body', output, 1
    +      assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
    +      assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
    +    end
    +
    +    test 'should convert source document to standalone document string when to_file is false and header_footer is true' do
    +      sample_input_path = fixture_path('sample.adoc')
    +
    +      output = Asciidoctor.convert_file sample_input_path, header_footer: true, to_file: false
    +      refute_empty output
    +      assert_xpath '/html', output, 1
    +      assert_xpath '/html/head', output, 1
    +      assert_xpath '/html/body', output, 1
    +      assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
    +      assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
    +    end
    +
    +    test 'lines in output should be separated by line feed' do
    +      sample_input_path = fixture_path('sample.adoc')
    +
    +      output = Asciidoctor.convert_file sample_input_path, standalone: true, to_file: false
    +      refute_empty output
    +      lines = output.split("\n")
    +      assert_equal lines.size, output.split(/\r\n|\r|\n/).size
    +      assert_equal lines.map(&:length), lines.map(&:rstrip).map(&:length)
    +    end
    +
    +    test 'should accept attributes as array' do
    +      sample_input_path = fixture_path('sample.adoc')
    +      output = Asciidoctor.convert_file sample_input_path, attributes: %w(sectnums idprefix idseparator=-), to_file: false
    +      assert_css '#section-a', output, 1
    +    end
    +
    +    test 'should accept attributes as string' do
    +      sample_input_path = fixture_path('sample.adoc')
    +      output = Asciidoctor.convert_file sample_input_path, attributes: 'sectnums idprefix idseparator=-', to_file: false
    +      assert_css '#section-a', output, 1
    +    end
    +
    +    test 'should link to default stylesheet by default when safe mode is SECURE or greater' do
    +      sample_input_path = fixture_path('basic.adoc')
    +      output = Asciidoctor.convert_file sample_input_path, standalone: true, to_file: false
    +      assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1
    +      assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1
    +    end
    +
    +    test 'should embed default stylesheet by default if SafeMode is less than SECURE' do
    +      input = <<~'EOS'
    +      = Document Title
    +
    +      text
    +      EOS
    +
    +      output = Asciidoctor.convert input, safe: Asciidoctor::SafeMode::SERVER, standalone: true
    +      assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1
    +      assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 0
    +      stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
    +      styles = stylenode.content
    +      refute_nil styles
    +      refute_empty styles.strip
    +    end
    +
    +    test 'should not allow linkcss be unset from document if SafeMode is SECURE or greater' do
    +      input = <<~'EOS'
    +      = Document Title
    +      :linkcss!:
    +
    +      text
    +      EOS
    +
    +      output = Asciidoctor.convert input, standalone: true
    +      assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1
    +      assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1
    +    end
    +
    +    test 'should embed default stylesheet if linkcss is unset from API and SafeMode is SECURE or greater' do
    +      input = <<~'EOS'
    +      = Document Title
    +
    +      text
    +      EOS
    +
    +      #[{ 'linkcss!' => '' }, { 'linkcss' => nil }, { 'linkcss' => false }].each do |attrs|
    +      [{ 'linkcss!' => '' }, { 'linkcss' => nil }].each do |attrs|
    +        output = Asciidoctor.convert input, standalone: true, attributes: attrs
    +        assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1
    +        assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 0
    +        stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
    +        styles = stylenode.content
    +        refute_nil styles
    +        refute_empty styles.strip
    +      end
    +    end
    +
    +    test 'should embed default stylesheet if safe mode is less than SECURE and linkcss is unset from API' do
    +      sample_input_path = fixture_path('basic.adoc')
    +      output = Asciidoctor.convert_file sample_input_path, standalone: true, to_file: false,
    +          safe: Asciidoctor::SafeMode::SAFE, attributes: { 'linkcss!' => '' }
    +      assert_css 'html:root > head > style', output, 1
    +      stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
    +      styles = stylenode.content
    +      refute_nil styles
    +      refute_empty styles.strip
    +    end
    +
    +    test 'should not link to stylesheet if stylesheet is unset' do
    +      input = <<~'EOS'
    +      = Document Title
    +
    +      text
    +      EOS
    +
    +      output = Asciidoctor.convert input, standalone: true, attributes: { 'stylesheet!' => '' }
    +      assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 0
    +      assert_css 'html:root > head > link[rel="stylesheet"]', output, 0
    +    end
    +
    +    test 'should link to custom stylesheet if specified in stylesheet attribute' do
    +      input = <<~'EOS'
    +      = Document Title
    +
    +      text
    +      EOS
    +
    +      output = Asciidoctor.convert input, standalone: true, attributes: { 'stylesheet' => './custom.css' }
    +      assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 0
    +      assert_css 'html:root > head > link[rel="stylesheet"][href="./custom.css"]', output, 1
    +
    +      output = Asciidoctor.convert input, standalone: true, attributes: { 'stylesheet' => 'file:///home/username/custom.css' }
    +      assert_css 'html:root > head > link[rel="stylesheet"][href="file:///home/username/custom.css"]', output, 1
    +    end
    +
    +    test 'should resolve custom stylesheet relative to stylesdir' do
    +      input = <<~'EOS'
    +      = Document Title
    +
    +      text
    +      EOS
    +
    +      output = Asciidoctor.convert input, standalone: true, attributes: { 'stylesheet' => 'custom.css', 'stylesdir' => './stylesheets' }
    +      assert_css 'html:root > head > link[rel="stylesheet"][href="./stylesheets/custom.css"]', output, 1
    +    end
    +
    +    test 'should resolve custom stylesheet to embed relative to stylesdir' do
    +      sample_input_path = fixture_path('basic.adoc')
    +      output = Asciidoctor.convert_file sample_input_path, standalone: true, safe: Asciidoctor::SafeMode::SAFE, to_file: false,
    +          attributes: { 'stylesheet' => 'custom.css', 'stylesdir' => './stylesheets', 'linkcss!' => '' }
    +      stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
    +      styles = stylenode.content
    +      refute_nil styles
    +      refute_empty styles.strip
    +    end
    +
    +    test 'should convert source file and write result to adjacent file by default' do
    +      sample_input_path = fixture_path('sample.adoc')
    +      sample_output_path = fixture_path('sample.html')
    +      begin
    +        Asciidoctor.convert_file sample_input_path
    +        assert File.exist?(sample_output_path)
    +        output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
    +        refute_empty output
    +        assert_xpath '/html', output, 1
    +        assert_xpath '/html/head', output, 1
    +        assert_xpath '/html/body', output, 1
    +        assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
    +        assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
    +      ensure
    +        FileUtils.rm(sample_output_path)
    +      end
    +    end
    +
    +    test 'should convert source file specified by pathname and write result to adjacent file by default' do
    +      sample_input_path = Pathname fixture_path 'sample.adoc'
    +      sample_output_path = Pathname fixture_path 'sample.html'
    +      begin
    +        doc = Asciidoctor.convert_file sample_input_path, safe: :safe
    +        assert_equal sample_output_path.expand_path.to_s, (doc.attr 'outfile')
    +        assert sample_output_path.file?
    +        output = sample_output_path.read mode: Asciidoctor::FILE_READ_MODE
    +        refute_empty output
    +        assert_xpath '/html', output, 1
    +        assert_xpath '/html/head', output, 1
    +        assert_xpath '/html/body', output, 1
    +        assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
    +        assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
    +      ensure
    +        sample_output_path.delete
    +      end
    +    end
    +
    +    test 'should convert source file and write to specified file' do
    +      sample_input_path = fixture_path('sample.adoc')
    +      sample_output_path = fixture_path('result.html')
    +      begin
    +        Asciidoctor.convert_file sample_input_path, to_file: sample_output_path
    +        assert File.exist?(sample_output_path)
    +        output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
    +        refute_empty output
    +        assert_xpath '/html', output, 1
    +        assert_xpath '/html/head', output, 1
    +        assert_xpath '/html/body', output, 1
    +        assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
    +        assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
    +      ensure
    +        FileUtils.rm(sample_output_path)
    +      end
    +    end
    +
    +    test 'should convert source file and write to specified file in base_dir' do
    +      sample_input_path = fixture_path('sample.adoc')
    +      sample_output_path = fixture_path('result.html')
    +      fixture_dir = fixture_path('')
    +      begin
    +        Asciidoctor.convert_file sample_input_path, to_file: 'result.html', base_dir: fixture_dir
    +        assert File.exist?(sample_output_path)
    +        output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
    +        refute_empty output
    +        assert_xpath '/html', output, 1
    +        assert_xpath '/html/head', output, 1
    +        assert_xpath '/html/body', output, 1
    +        assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
    +        assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
    +      rescue => e
    +        flunk e.message
    +      ensure
    +        FileUtils.rm(sample_output_path, force: true)
    +      end
    +    end
    +
    +    test 'in_place option is ignored when to_file is specified' do
    +      sample_input_path = fixture_path('sample.adoc')
    +      sample_output_path = fixture_path('result.html')
    +      begin
    +        Asciidoctor.convert_file sample_input_path, to_file: sample_output_path, in_place: true
    +        assert File.exist?(sample_output_path)
    +      ensure
    +        FileUtils.rm(sample_output_path) if File.exist? sample_output_path
    +      end
    +    end
    +
    +    test 'in_place option is ignored when to_dir is specified' do
    +      sample_input_path = fixture_path('sample.adoc')
    +      sample_output_path = fixture_path('sample.html')
    +      begin
    +        Asciidoctor.convert_file sample_input_path, to_dir: File.dirname(sample_output_path), in_place: true
    +        assert File.exist?(sample_output_path)
    +      ensure
    +        FileUtils.rm(sample_output_path) if File.exist? sample_output_path
    +      end
    +    end
    +
    +    test 'should set outfilesuffix to match file extension of target file' do
    +      sample_input = '{outfilesuffix}'
    +      sample_output_path = fixture_path('result.htm')
    +      begin
    +        Asciidoctor.convert sample_input, to_file: sample_output_path
    +        assert File.exist?(sample_output_path)
    +        output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
    +        refute_empty output
    +        assert_include '

    .htm

    ', output + ensure + FileUtils.rm(sample_output_path) + end + end + + test 'should respect outfilesuffix soft set from API' do + sample_input_path = fixture_path('sample.adoc') + sample_output_path = fixture_path('sample.htm') + begin + Asciidoctor.convert_file sample_input_path, to_dir: (File.dirname sample_input_path), attributes: { 'outfilesuffix' => '.htm@' } + assert File.exist?(sample_output_path) + ensure + FileUtils.rm(sample_output_path) + end + end + + test 'output should be relative to to_dir option' do + sample_input_path = fixture_path('sample.adoc') + output_dir = File.join(File.dirname(sample_input_path), 'test_output') + Dir.mkdir output_dir if !File.exist? output_dir + sample_output_path = File.join(output_dir, 'sample.html') + begin + Asciidoctor.convert_file sample_input_path, to_dir: output_dir + assert File.exist? sample_output_path + ensure + FileUtils.rm(sample_output_path) if File.exist? sample_output_path + FileUtils.rmdir output_dir + end + end + + test 'missing directories should be created if mkdirs is enabled' do + sample_input_path = fixture_path('sample.adoc') + output_dir = File.join(File.join(File.dirname(sample_input_path), 'test_output'), 'subdir') + sample_output_path = File.join(output_dir, 'sample.html') + begin + Asciidoctor.convert_file sample_input_path, to_dir: output_dir, mkdirs: true + assert File.exist? sample_output_path + ensure + FileUtils.rm(sample_output_path) if File.exist? sample_output_path + FileUtils.rmdir output_dir + FileUtils.rmdir File.dirname(output_dir) + end + end + + # TODO need similar test for when to_dir is specified + test 'should raise exception if an attempt is made to overwrite input file' do + sample_input_path = fixture_path('sample.adoc') + + assert_raises IOError do + Asciidoctor.convert_file sample_input_path, attributes: { 'outfilesuffix' => '.adoc' } + end + end + + test 'to_file should be relative to to_dir when both given' do + sample_input_path = fixture_path('sample.adoc') + base_dir = File.dirname(sample_input_path) + sample_rel_output_path = File.join('test_output', 'result.html') + output_dir = File.dirname(File.join(base_dir, sample_rel_output_path)) + Dir.mkdir output_dir if !File.exist? output_dir + sample_output_path = File.join(base_dir, sample_rel_output_path) + begin + Asciidoctor.convert_file sample_input_path, to_dir: base_dir, to_file: sample_rel_output_path + assert File.exist? sample_output_path + ensure + FileUtils.rm(sample_output_path) if File.exist? sample_output_path + FileUtils.rmdir output_dir + end + end + + test 'should not modify options argument' do + options = { + safe: Asciidoctor::SafeMode::SAFE, + to_file: false, + } + options.freeze + sample_input_path = fixture_path('sample.adoc') + begin + Asciidoctor.convert_file sample_input_path, options + rescue + flunk %(options argument should not be modified) + end + end + + test 'should set to_dir option to parent directory of specified output file' do + sample_input_path = fixture_path 'basic.adoc' + sample_output_path = fixture_path 'basic.html' + begin + doc = Asciidoctor.convert_file sample_input_path, to_file: sample_output_path + assert_equal File.dirname(sample_output_path), doc.options[:to_dir] + ensure + FileUtils.rm(sample_output_path) + end + end + + test 'should set to_dir option to parent directory of specified output directory and file' do + sample_input_path = fixture_path 'basic.adoc' + sample_output_path = fixture_path 'basic.html' + fixture_base_path = File.dirname sample_output_path + fixture_parent_path = File.dirname fixture_base_path + sample_output_relpath = File.join 'fixtures', 'basic.html' + begin + doc = Asciidoctor.convert_file sample_input_path, to_dir: fixture_parent_path, to_file: sample_output_relpath + assert_equal fixture_base_path, doc.options[:to_dir] + ensure + FileUtils.rm(sample_output_path) + end + end + + test 'timings are recorded for each step' do + sample_input_path = fixture_path 'asciidoc_index.txt' + Asciidoctor.convert_file sample_input_path, timings: (timings = Asciidoctor::Timings.new), to_file: false + refute_equal '0.00000', '%05.5f' % timings.read_parse.to_f + refute_equal '0.00000', '%05.5f' % timings.convert.to_f + refute_equal timings.read_parse, timings.total + end + + test 'can override syntax highlighter using syntax_highlighters option' do + syntax_hl = Class.new Asciidoctor::SyntaxHighlighter::Base do + def highlight? + true + end + + def highlight node, source, lang, opts + 'highlighted' + end + end + input = <<~'EOS' + [source,ruby] + ---- + puts 'Hello, World!' + ---- + EOS + output = Asciidoctor.convert input, safe: :safe, syntax_highlighters: { 'coderay' => syntax_hl }, attributes: { 'source-highlighter' => 'coderay' } + assert_css 'pre.highlight > code[data-lang="ruby"]', output, 1 + assert_xpath '//pre[@class="coderay highlight"]/code[text()="highlighted"]', output, 1 + end + end + + context 'AST' do + test 'with no author' do + input = <<~'EOS' + = Getting Real: The Smarter, Faster, Easier Way to Build a Successful Web Application + + Getting Real details the business, design, programming, and marketing principles of 37signals. + EOS + + doc = document_from_string input + assert_equal 0, doc.authors.size + end + + test 'with one author' do + input = <<~'EOS' + = Getting Real: The Smarter, Faster, Easier Way to Build a Successful Web Application + David Heinemeier Hansson + + Getting Real details the business, design, programming, and marketing principles of 37signals. + EOS + + doc = document_from_string input + authors = doc.authors + assert_equal 1, authors.size + author_1 = authors[0] + assert_equal 'david@37signals.com', author_1.email + assert_equal 'David Heinemeier Hansson', author_1.name + assert_equal 'David', author_1.firstname + assert_equal 'Heinemeier', author_1.middlename + assert_equal 'Hansson', author_1.lastname + assert_equal 'DHH', author_1.initials + end + + test 'with two authors' do + input = <<~'EOS' + = Getting Real: The Smarter, Faster, Easier Way to Build a Successful Web Application + David Heinemeier Hansson ; Jason Fried + + Getting Real details the business, design, programming, and marketing principles of 37signals. + EOS + + doc = document_from_string input + authors = doc.authors + assert_equal 2, authors.size + author_1 = authors[0] + assert_equal 'david@37signals.com', author_1.email + assert_equal 'David Heinemeier Hansson', author_1.name + assert_equal 'David', author_1.firstname + assert_equal 'Heinemeier', author_1.middlename + assert_equal 'Hansson', author_1.lastname + assert_equal 'DHH', author_1.initials + author_2 = authors[1] + assert_equal 'jason@37signals.com', author_2.email + assert_equal 'Jason Fried', author_2.name + assert_equal 'Jason', author_2.firstname + assert_nil author_2.middlename + assert_equal 'Fried', author_2.lastname + assert_equal 'JF', author_2.initials + end + + test 'with authors as attributes' do + input = <<~'EOS' + = Getting Real: The Smarter, Faster, Easier Way to Build a Successful Web Application + :author_1: David Heinemeier Hansson + :email_1: david@37signals.com + :author_2: Jason Fried + :email_2: jason@37signals.com + + Getting Real details the business, design, programming, and marketing principles of 37signals. + EOS + + doc = document_from_string input + authors = doc.authors + assert_equal 2, authors.size + author_1 = authors[0] + assert_equal 'david@37signals.com', author_1.email + assert_equal 'David Heinemeier Hansson', author_1.name + assert_equal 'David', author_1.firstname + assert_equal 'Heinemeier', author_1.middlename + assert_equal 'Hansson', author_1.lastname + assert_equal 'DHH', author_1.initials + author_2 = authors[1] + assert_equal 'jason@37signals.com', author_2.email + assert_equal 'Jason Fried', author_2.name + assert_equal 'Jason', author_2.firstname + assert_nil author_2.middlename + assert_equal 'Fried', author_2.lastname + assert_equal 'JF', author_2.initials + end + + test 'should not crash if nil cell text is passed to Cell constructor' do + input = <<~'EOS' + |=== + |a + |=== + EOS + table = (document_from_string input).blocks[0] + cell = Asciidoctor::Table::Cell.new table.rows.body[0][0].column, nil, {} + refute cell.style + assert_same Asciidoctor::AbstractNode::NORMAL_SUBS, cell.subs + assert_equal '', cell.text + end + + test 'should set option on node when set_option is called' do + input = <<~'EOS' + . three + . two + . one + EOS + + block = (document_from_string input).blocks[0] + block.set_option('reversed') + assert block.option? 'reversed' + assert_equal '', block.attributes['reversed-option'] + end + + test 'enabled_options should return all options which are set' do + input = <<~'EOS' + [%interactive] + * [x] code + * [ ] test + * [ ] profit + EOS + + block = (document_from_string input).blocks[0] + assert_equal %w(checklist interactive).to_set, block.enabled_options + end + + test 'should append option to existing options' do + input = <<~'EOS' + [%fancy] + . three + . two + . one + EOS + + block = (document_from_string input).blocks[0] + block.set_option('reversed') + assert block.option? 'fancy' + assert block.option? 'reversed' + end + + test 'should not append option if option is already set' do + input = <<~'EOS' + [%reversed] + . three + . two + . one + EOS + + block = (document_from_string input).blocks[0] + refute block.set_option('reversed') + assert_equal '', block.attributes['reversed-option'] + end + + test 'should return set of option names' do + input = <<~'EOS' + [%compact%reversed] + . three + . two + . one + EOS + + block = (document_from_string input).blocks[0] + assert_equal %w(compact reversed).to_set, block.enabled_options + end + + test 'table column should not be a block or inline' do + input = <<~'EOS' + |=== + |a + |=== + EOS + + col = (document_from_string input).blocks[0].columns[0] + refute col.block? + refute col.inline? + end + + test 'table cell should be a block' do + input = <<~'EOS' + |=== + |a + |=== + EOS + + cell = (document_from_string input).blocks[0].rows.body[0][0] + assert_kind_of ::Asciidoctor::AbstractBlock, cell + assert cell.block? + refute cell.inline? + end + + test 'next_adjacent_block should return next block' do + input = <<~'EOS' + first + + second + EOS + + doc = document_from_string input + assert_equal doc.blocks[1], doc.blocks[0].next_adjacent_block + end + + test 'next_adjacent_block should return next sibling of parent if called on last sibling' do + input = <<~'EOS' + -- + first + -- + + second + EOS + + doc = document_from_string input + assert_equal doc.blocks[1], doc.blocks[0].blocks[0].next_adjacent_block + end + + test 'next_adjacent_block should return next sibling of list if called on last item' do + input = <<~'EOS' + * first + + second + EOS + + doc = document_from_string input + assert_equal doc.blocks[1], doc.blocks[0].blocks[0].next_adjacent_block + end + + test 'next_adjacent_block should return next item in dlist if called on last block of list item' do + input = <<~'EOS' + first:: + desc + + + more desc + + second:: + desc + EOS + + doc = document_from_string input + assert_equal doc.blocks[0].items[1], doc.blocks[0].items[0][1].blocks[0].next_adjacent_block + end + end +end diff -Nru asciidoctor-1.5.5/test/attribute_list_test.rb asciidoctor-2.0.10/test/attribute_list_test.rb --- asciidoctor-1.5.5/test/attribute_list_test.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/attribute_list_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,247 @@ +# frozen_string_literal: true +require_relative 'test_helper' + +context 'AttributeList' do + test 'collect unnamed attribute' do + attributes = {} + line = 'quote' + expected = { 1 => 'quote' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect unnamed attribute double-quoted' do + attributes = {} + line = '"quote"' + expected = { 1 => 'quote' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect empty unnamed attribute double-quoted' do + attributes = {} + line = '""' + expected = { 1 => '' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect unnamed attribute double-quoted containing escaped quote' do + attributes = {} + line = '"ba\"zaar"' + expected = { 1 => 'ba"zaar' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect unnamed attribute single-quoted' do + attributes = {} + line = '\'quote\'' + expected = { 1 => 'quote' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect empty unnamed attribute single-quoted' do + attributes = {} + line = '\'\'' + expected = { 1 => '' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect isolated single quote positional attribute' do + attributes = {} + line = '\'' + expected = { 1 => '\'' } + doc = empty_document + def doc.apply_subs *args + fail 'apply_subs should not be called' + end + Asciidoctor::AttributeList.new(line, doc).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect isolated single quote attribute value' do + attributes = {} + line = 'name=\'' + expected = { 'name' => '\'' } + doc = empty_document + def doc.apply_subs *args + fail 'apply_subs should not be called' + end + Asciidoctor::AttributeList.new(line, doc).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect attribute value as is if it has only leading single quote' do + attributes = {} + line = 'name=\'{val}' + expected = { 'name' => '\'{val}' } + doc = empty_document attributes: { 'val' => 'val' } + def doc.apply_subs *args + fail 'apply_subs should not be called' + end + Asciidoctor::AttributeList.new(line, doc).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect unnamed attribute single-quoted containing escaped quote' do + attributes = {} + line = '\'ba\\\'zaar\'' + expected = { 1 => 'ba\'zaar' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect unnamed attribute with dangling delimiter' do + attributes = {} + line = 'quote , ' + expected = { 1 => 'quote' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect unnamed attribute in second position after empty attribute' do + attributes = {} + line = ', John Smith' + expected = { 1 => nil, 2 => 'John Smith' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect unnamed attributes' do + attributes = {} + line = 'first, second one, third' + expected = { 1 => 'first', 2 => 'second one', 3 => 'third' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect named attribute' do + attributes = {} + line = 'foo=bar' + expected = { 'foo' => 'bar' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect named attribute double-quoted' do + attributes = {} + line = 'foo="bar"' + expected = { 'foo' => 'bar' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect named attribute with double-quoted empty value' do + attributes = {} + line = 'height=100,caption="",link="images/octocat.png"' + expected = { 'height' => '100', 'caption' => '', 'link' => 'images/octocat.png' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect named attribute single-quoted' do + attributes = {} + line = 'foo=\'bar\'' + expected = { 'foo' => 'bar' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect named attribute with single-quoted empty value' do + attributes = {} + line = %(height=100,caption='',link='images/octocat.png') + expected = { 'height' => '100', 'caption' => '', 'link' => 'images/octocat.png' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect single named attribute with empty value' do + attributes = {} + line = 'foo=' + expected = { 'foo' => '' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect single named attribute with empty value when followed by other attributes' do + attributes = {} + line = 'foo=,bar=baz' + expected = { 'foo' => '', 'bar' => 'baz' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect named attributes unquoted' do + attributes = {} + line = 'first=value, second=two, third=3' + expected = { 'first' => 'value', 'second' => 'two', 'third' => '3' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect named attributes quoted' do + attributes = {} + line = %(first='value', second="value two", third=three) + expected = { 'first' => 'value', 'second' => 'value two', 'third' => 'three' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect named attributes quoted containing non-semantic spaces' do + attributes = {} + line = %( first = 'value', second ="value two" , third= three ) + expected = { 'first' => 'value', 'second' => 'value two', 'third' => 'three' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect mixed named and unnamed attributes' do + attributes = {} + line = %(first, second="value two", third=three, Sherlock Holmes) + expected = { 1 => 'first', 'second' => 'value two', 'third' => 'three', 4 => 'Sherlock Holmes' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect options attribute' do + attributes = {} + line = %(quote, options='opt1,,opt2 , opt3') + expected = { 1 => 'quote', 'opt1-option' => '', 'opt2-option' => '', 'opt3-option' => '' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect opts attribute as options' do + attributes = {} + line = %(quote, opts='opt1,,opt2 , opt3') + expected = { 1 => 'quote', 'opt1-option' => '', 'opt2-option' => '', 'opt3-option' => '' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'should ignore options attribute if empty' do + attributes = {} + line = %(quote, opts=) + expected = { 1 => 'quote' } + Asciidoctor::AttributeList.new(line).parse_into(attributes) + assert_equal expected, attributes + end + + test 'collect and rekey unnamed attributes' do + attributes = {} + line = 'first, second one, third, fourth' + expected = { 1 => 'first', 2 => 'second one', 3 => 'third', 4 => 'fourth', 'a' => 'first', 'b' => 'second one', 'c' => 'third' } + Asciidoctor::AttributeList.new(line).parse_into(attributes, ['a', 'b', 'c']) + assert_equal expected, attributes + end + + test 'rekey positional attributes' do + attributes = { 1 => 'source', 2 => 'java' } + expected = { 1 => 'source', 2 => 'java', 'style' => 'source', 'language' => 'java' } + Asciidoctor::AttributeList.rekey(attributes, ['style', 'language', 'linenums']) + assert_equal expected, attributes + end +end diff -Nru asciidoctor-1.5.5/test/attributes_test.rb asciidoctor-2.0.10/test/attributes_test.rb --- asciidoctor-1.5.5/test/attributes_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/attributes_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,10 +1,17 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context 'Attributes' do + default_logger = Asciidoctor::LoggerManager.logger + + setup do + Asciidoctor::LoggerManager.logger = (@logger = Asciidoctor::MemoryLogger.new) + end + + teardown do + Asciidoctor::LoggerManager.logger = default_logger + end + context 'Assignment' do test 'creates an attribute' do doc = document_from_string(':frog: Tanglefoot') @@ -13,67 +20,108 @@ test 'requires a space after colon following attribute name' do doc = document_from_string 'foo:bar' - assert_equal nil, doc.attributes['foo'] + assert_nil doc.attributes['foo'] + end + + # NOTE AsciiDoc Python recognizes this entry + test 'does not recognize attribute entry if name contains colon' do + input = ':foo:bar: baz' + doc = document_from_string input + refute doc.attr?('foo:bar') + assert_equal 1, doc.blocks.size + assert_equal :paragraph, doc.blocks[0].context + end + + # NOTE AsciiDoc Python recognizes this entry + test 'does not recognize attribute entry if name ends with colon' do + input = ':foo:: bar' + doc = document_from_string input + refute doc.attr?('foo:') + assert_equal 1, doc.blocks.size + assert_equal :dlist, doc.blocks[0].context + end + + # NOTE AsciiDoc Python does not recognize this entry + test 'allows any word character defined by Unicode in an attribute name' do + [['café', 'a coffee shop'], ['سمن', %(سازمان مردمنهاد)]].each do |(name, value)| + str = <<~EOS + :#{name}: #{value} + + {#{name}} + EOS + result = convert_string_to_embedded str + assert_includes result, %(

    #{value}

    ) + end end test 'creates an attribute by fusing a legacy multi-line value' do - str = <<-EOS -:description: This is the first + - Ruby implementation of + - AsciiDoc. + str = <<~'EOS' + :description: This is the first + + Ruby implementation of + + AsciiDoc. EOS doc = document_from_string(str) assert_equal 'This is the first Ruby implementation of AsciiDoc.', doc.attributes['description'] end test 'creates an attribute by fusing a multi-line value' do - str = <<-EOS -:description: This is the first \\ - Ruby implementation of \\ - AsciiDoc. + str = <<~'EOS' + :description: This is the first \ + Ruby implementation of \ + AsciiDoc. EOS doc = document_from_string(str) assert_equal 'This is the first Ruby implementation of AsciiDoc.', doc.attributes['description'] end test 'honors line break characters in multi-line values' do - str = <<-EOS -:signature: Linus Torvalds + \\ -Linux Hacker + \\ -linus.torvalds@example.com + str = <<~'EOS' + :signature: Linus Torvalds + \ + Linux Hacker + \ + linus.torvalds@example.com EOS doc = document_from_string(str) assert_equal %(Linus Torvalds +\nLinux Hacker +\nlinus.torvalds@example.com), doc.attributes['signature'] end + test 'should allow pass macro to surround a multi-line value that contains line breaks' do + str = <<~'EOS' + :signature: pass:a[{author} + \ + {title} + \ + {email}] + EOS + doc = document_from_string str, attributes: { 'author' => 'Linus Torvalds', 'title' => 'Linux Hacker', 'email' => 'linus.torvalds@example.com' } + assert_equal %(Linus Torvalds +\nLinux Hacker +\nlinus.torvalds@example.com), (doc.attr 'signature') + end + test 'should delete an attribute that ends with !' do doc = document_from_string(":frog: Tanglefoot\n:frog!:") - assert_equal nil, doc.attributes['frog'] + assert_nil doc.attributes['frog'] end test 'should delete an attribute that ends with ! set via API' do - doc = document_from_string(":frog: Tanglefoot", :attributes => {'frog!' => ''}) - assert_equal nil, doc.attributes['frog'] + doc = document_from_string(":frog: Tanglefoot", attributes: { 'frog!' => '' }) + assert_nil doc.attributes['frog'] end test 'should delete an attribute that begins with !' do doc = document_from_string(":frog: Tanglefoot\n:!frog:") - assert_equal nil, doc.attributes['frog'] + assert_nil doc.attributes['frog'] end test 'should delete an attribute that begins with ! set via API' do - doc = document_from_string(":frog: Tanglefoot", :attributes => {'!frog' => ''}) - assert_equal nil, doc.attributes['frog'] + doc = document_from_string(":frog: Tanglefoot", attributes: { '!frog' => '' }) + assert_nil doc.attributes['frog'] end test 'should delete an attribute set via API to nil value' do - doc = document_from_string(":frog: Tanglefoot", :attributes => {'frog' => nil}) - assert_equal nil, doc.attributes['frog'] + doc = document_from_string(":frog: Tanglefoot", attributes: { 'frog' => nil }) + assert_nil doc.attributes['frog'] end test "doesn't choke when deleting a non-existing attribute" do doc = document_from_string(':frog!:') - assert_equal nil, doc.attributes['frog'] + assert_nil doc.attributes['frog'] end test "replaces special characters in attribute value" do @@ -86,122 +134,120 @@ assert_equal 'Asciidoctor 1.0', doc.attributes['release'] end - test "assigns attribute to empty string if substitution fails to resolve attribute" do - doc = document_from_string ":release: Asciidoctor {version}", :attributes => { 'attribute-missing' => 'drop-line' } - assert_equal '', doc.attributes['release'] + test 'assigns attribute to empty string if substitution fails to resolve attribute' do + input = ':release: Asciidoctor {version}' + document_from_string input, attributes: { 'attribute-missing' => 'drop-line' } + assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: version' end - test "assigns multi-line attribute to empty string if substitution fails to resolve attribute" do - doc = document_from_string ":release: Asciidoctor +\n {version}", :attributes => { 'attribute-missing' => 'drop-line' } + test 'assigns multi-line attribute to empty string if substitution fails to resolve attribute' do + input = <<~'EOS' + :release: Asciidoctor + + {version} + EOS + doc = document_from_string input, attributes: { 'attribute-missing' => 'drop-line' } assert_equal '', doc.attributes['release'] + assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: version' end test 'resolves attributes inside attribute value within header' do - input = <<-EOS -= Document Title -:big: big -:bigfoot: {big}foot + input = <<~'EOS' + = Document Title + :big: big + :bigfoot: {big}foot -{bigfoot} + {bigfoot} EOS - result = render_embedded_string input - assert result.include? 'bigfoot' + result = convert_string_to_embedded input + assert_includes result, 'bigfoot' end test 'resolves attributes and pass macro inside attribute value outside header' do - input = <<-EOS -= Document Title + input = <<~'EOS' + = Document Title -content + content -:big: pass:a,q[_big_] -:bigfoot: {big}foot -{bigfoot} + :big: pass:a,q[_big_] + :bigfoot: {big}foot + {bigfoot} EOS - result = render_embedded_string input - assert result.include? 'bigfoot' + result = convert_string_to_embedded input + assert_includes result, 'bigfoot' end test 'should limit maximum size of attribute value if safe mode is SECURE' do expected = 'a' * 4096 - input = <<-EOS -:name: #{'a' * 5000} + input = <<~EOS + :name: #{'a' * 5000} -{name} + {name} EOS - result = render_embedded_string input, :doctype => :inline + result = convert_inline_string input assert_equal expected, result assert_equal 4096, result.bytesize end test 'should handle multibyte characters when limiting attribute value size' do expected = '日本' - input = <<-EOS -:name: 日本語 + input = <<~'EOS' + :name: 日本語 -{name} + {name} EOS - result = render_embedded_string input, :doctype => :inline, :attributes => { 'max-attribute-value-size' => 6 } + result = convert_inline_string input, attributes: { 'max-attribute-value-size' => 6 } assert_equal expected, result assert_equal 6, result.bytesize end test 'should not mangle multibyte characters when limiting attribute value size' do expected = '日本' - input = <<-EOS -:name: 日本語 + input = <<~'EOS' + :name: 日本語 -{name} + {name} EOS - result = render_embedded_string input, :doctype => :inline, :attributes => { 'max-attribute-value-size' => 8 } + result = convert_inline_string input, attributes: { 'max-attribute-value-size' => 8 } assert_equal expected, result assert_equal 6, result.bytesize end test 'should allow maximize size of attribute value to be disabled' do expected = 'a' * 5000 - input = <<-EOS -:name: #{'a' * 5000} + input = <<~EOS + :name: #{'a' * 5000} -{name} + {name} EOS - result = render_embedded_string input, :doctype => :inline, :attributes => { 'max-attribute-value-size' => nil } + result = convert_inline_string input, attributes: { 'max-attribute-value-size' => nil } assert_equal expected, result assert_equal 5000, result.bytesize end test 'resolves user-home attribute if safe mode is less than SERVER' do - input = <<-EOS -:imagesdir: {user-home}/etc/images + input = <<~'EOS' + :imagesdir: {user-home}/etc/images -{imagesdir} -EOS - output = render_embedded_string input, :doctype => :inline, :safe => :safe - if RUBY_VERSION >= '1.9' - assert_equal %(#{Dir.home}/etc/images), output - else - assert_equal %(#{ENV['HOME']}/etc/images), output - end + {imagesdir} + EOS + output = convert_inline_string input, safe: :safe + assert_equal %(#{Asciidoctor::USER_HOME}/etc/images), output end test 'user-home attribute resolves to . if safe mode is SERVER or greater' do - input = <<-EOS -:imagesdir: {user-home}/etc/images + input = <<~'EOS' + :imagesdir: {user-home}/etc/images -{imagesdir} -EOS - output = render_embedded_string input, :doctype => :inline, :safe => :server - if RUBY_VERSION >= '1.9' - assert_equal %(./etc/images), output - else - assert_equal %(./etc/images), output - end + {imagesdir} + EOS + output = convert_inline_string input, safe: :server + assert_equal './etc/images', output end test "apply custom substitutions to text in passthrough macro and assign to attribute" do @@ -211,50 +257,114 @@ assert_equal '<>&', doc.attributes['xml-busters'] doc = document_from_string(":xml-busters: pass:specialcharacters[<>&]") assert_equal '<>&', doc.attributes['xml-busters'] + doc = document_from_string(":xml-busters: pass:n,-c[<(C)>]") + assert_equal '<©>', doc.attributes['xml-busters'] + end + + test 'should not recognize pass macro with invalid substitution list in attribute value' do + [',', '42', 'a,'].each do |subs| + doc = document_from_string %(:pass-fail: pass:#{subs}[whale]) + assert_equal %(pass:#{subs}[whale]), doc.attributes['pass-fail'] + end end test "attribute is treated as defined until it's not" do - input = <<-EOS -:holygrail: -ifdef::holygrail[] -The holy grail has been found! -endif::holygrail[] - -:holygrail!: -ifndef::holygrail[] -Buggers! What happened to the grail? -endif::holygrail[] + input = <<~'EOS' + :holygrail: + ifdef::holygrail[] + The holy grail has been found! + endif::holygrail[] + + :holygrail!: + ifndef::holygrail[] + Buggers! What happened to the grail? + endif::holygrail[] EOS - output = render_string input + output = convert_string input assert_xpath '//p', output, 2 assert_xpath '(//p)[1][text() = "The holy grail has been found!"]', output, 1 assert_xpath '(//p)[2][text() = "Buggers! What happened to the grail?"]', output, 1 end - # Validates requirement: "Header attributes are overridden by command-line attributes." - test 'attribute defined in document options overrides attribute in document' do - doc = document_from_string(':cash: money', :attributes => {'cash' => 'heroes'}) + test 'attribute set via API overrides attribute set in document' do + doc = document_from_string(':cash: money', attributes: { 'cash' => 'heroes' }) assert_equal 'heroes', doc.attributes['cash'] end - test 'attribute defined in document options cannot be unassigned in document' do - doc = document_from_string(':cash!:', :attributes => {'cash' => 'heroes'}) + test 'attribute set via API cannot be unset by document' do + doc = document_from_string(':cash!:', attributes: { 'cash' => 'heroes' }) assert_equal 'heroes', doc.attributes['cash'] end - test 'attribute undefined in document options cannot be assigned in document' do - doc = document_from_string(':cash: money', :attributes => {'cash!' => '' }) - assert_equal nil, doc.attributes['cash'] - doc = document_from_string(':cash: money', :attributes => {'cash' => nil }) - assert_equal nil, doc.attributes['cash'] + test 'attribute soft set via API using modifier on name can be overridden by document' do + doc = document_from_string(':cash: money', attributes: { 'cash@' => 'heroes' }) + assert_equal 'money', doc.attributes['cash'] + end + + test 'attribute soft set via API using modifier on value can be overridden by document' do + doc = document_from_string(':cash: money', attributes: { 'cash' => 'heroes@' }) + assert_equal 'money', doc.attributes['cash'] + end + + test 'attribute soft set via API using modifier on name can be unset by document' do + doc = document_from_string(':cash!:', attributes: { 'cash@' => 'heroes' }) + assert_nil doc.attributes['cash'] + doc = document_from_string(':cash!:', attributes: { 'cash@' => true }) + assert_nil doc.attributes['cash'] + end + + test 'attribute soft set via API using modifier on value can be unset by document' do + doc = document_from_string(':cash!:', attributes: { 'cash' => 'heroes@' }) + assert_nil doc.attributes['cash'] + end + + test 'attribute unset via API cannot be set by document' do + [ + { 'cash!' => '' }, + { '!cash' => '' }, + { 'cash' => nil }, + ].each do |attributes| + doc = document_from_string(':cash: money', attributes: attributes) + assert_nil doc.attributes['cash'] + end + end + + test 'attribute soft unset via API can be set by document' do + [ + { 'cash!@' => '' }, + { '!cash@' => '' }, + { 'cash!' => '@' }, + { '!cash' => '@' }, + { 'cash' => false }, + ].each do |attributes| + doc = document_from_string(':cash: money', attributes: attributes) + assert_equal 'money', doc.attributes['cash'] + end + end + + test 'can soft unset built-in attribute from API and still override in document' do + [ + { 'sectids!@' => '' }, + { '!sectids@' => '' }, + { 'sectids!' => '@' }, + { '!sectids' => '@' }, + { 'sectids' => false }, + ].each do |attributes| + doc = document_from_string '== Heading', attributes: attributes + refute doc.attr?('sectids') + assert_css '#_heading', (doc.convert standalone: false), 0 + doc = document_from_string %(:sectids:\n\n== Heading), attributes: attributes + assert doc.attr?('sectids') + assert_css '#_heading', (doc.convert standalone: false), 1 + end end test 'backend and doctype attributes are set by default in default configuration' do - input = <<-EOS -= Document Title -Author Name + input = <<~'EOS' + = Document Title + Author Name -content + content EOS doc = document_from_string input @@ -269,7 +379,7 @@ 'doctype' => 'article', 'doctype-article' => '', 'filetype' => 'html', - 'filetype-html' => '' + 'filetype-html' => '', } expect.each do |key, val| assert doc.attributes.key? key @@ -278,14 +388,14 @@ end test 'backend and doctype attributes are set by default in custom configuration' do - input = <<-EOS -= Document Title -Author Name + input = <<~'EOS' + = Document Title + Author Name -content + content EOS - doc = document_from_string input, :doctype => 'book', :backend => 'docbook' + doc = document_from_string input, doctype: 'book', backend: 'docbook' expect = { 'backend' => 'docbook5', 'backend-docbook5' => '', @@ -297,7 +407,7 @@ 'doctype' => 'book', 'doctype-book' => '', 'filetype' => 'xml', - 'filetype-xml' => '' + 'filetype-xml' => '', } expect.each do |key, val| assert doc.attributes.key? key @@ -306,16 +416,16 @@ end test 'backend attributes are updated if backend attribute is defined in document and safe mode is less than SERVER' do - input = <<-EOS -= Document Title -Author Name -:backend: docbook -:doctype: book + input = <<~'EOS' + = Document Title + Author Name + :backend: docbook + :doctype: book -content + content EOS - doc = document_from_string input, :safe => Asciidoctor::SafeMode::SAFE + doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE expect = { 'backend' => 'docbook5', 'backend-docbook5' => '', @@ -327,263 +437,364 @@ 'doctype' => 'book', 'doctype-book' => '', 'filetype' => 'xml', - 'filetype-xml' => '' + 'filetype-xml' => '', } expect.each do |key, val| assert doc.attributes.key?(key) assert_equal val, doc.attributes[key] end - assert !doc.attributes.key?('backend-html5') - assert !doc.attributes.key?('backend-html5-doctype-article') - assert !doc.attributes.key?('basebackend-html') - assert !doc.attributes.key?('basebackend-html-doctype-article') - assert !doc.attributes.key?('doctype-article') - assert !doc.attributes.key?('filetype-html') + refute doc.attributes.key?('backend-html5') + refute doc.attributes.key?('backend-html5-doctype-article') + refute doc.attributes.key?('basebackend-html') + refute doc.attributes.key?('basebackend-html-doctype-article') + refute doc.attributes.key?('doctype-article') + refute doc.attributes.key?('filetype-html') end test 'backend attributes defined in document options overrides backend attribute in document' do - doc = document_from_string(':backend: docbook45', :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'backend' => 'html5'}) + doc = document_from_string(':backend: docbook5', safe: Asciidoctor::SafeMode::SAFE, attributes: { 'backend' => 'html5' }) assert_equal 'html5', doc.attributes['backend'] - assert doc.attributes.has_key? 'backend-html5' + assert doc.attributes.key? 'backend-html5' assert_equal 'html', doc.attributes['basebackend'] - assert doc.attributes.has_key? 'basebackend-html' + assert doc.attributes.key? 'basebackend-html' + end + + test 'can only access a positional attribute from the attributes hash' do + node = Asciidoctor::Block.new nil, :paragraph, attributes: { 1 => 'position 1' } + assert_nil node.attr(1) + refute node.attr?(1) + assert_equal 'position 1', node.attributes[1] + end + + test 'attr should not retrieve attribute from document if not set on block' do + doc = document_from_string 'paragraph', :attributes => { 'name' => 'value' } + para = doc.blocks[0] + assert_nil para.attr 'name' + end + + test 'attr looks for attribute on document if fallback name is true' do + doc = document_from_string 'paragraph', :attributes => { 'name' => 'value' } + para = doc.blocks[0] + assert_equal 'value', (para.attr 'name', nil, true) + end + + test 'attr uses fallback name when looking for attribute on document' do + doc = document_from_string 'paragraph', :attributes => { 'alt-name' => 'value' } + para = doc.blocks[0] + assert_equal 'value', (para.attr 'name', nil, 'alt-name') + end + + test 'attr? should not check for attribute on document if not set on block' do + doc = document_from_string 'paragraph', :attributes => { 'name' => 'value' } + para = doc.blocks[0] + refute para.attr? 'name' + end + + test 'attr? checks for attribute on document if fallback name is true' do + doc = document_from_string 'paragraph', :attributes => { 'name' => 'value' } + para = doc.blocks[0] + assert para.attr? 'name', nil, true + end + + test 'attr? checks for fallback name when looking for attribute on document' do + doc = document_from_string 'paragraph', :attributes => { 'alt-name' => 'value' } + para = doc.blocks[0] + assert para.attr? 'name', nil, 'alt-name' + end + + test 'set_attr should set value to empty string if no value is specified' do + node = Asciidoctor::Block.new nil, :paragraph, attributes: {} + node.set_attr 'foo' + assert_equal '', (node.attr 'foo') + end + + test 'remove_attr should remove attribute and return previous value' do + doc = empty_document + node = Asciidoctor::Block.new doc, :paragraph, attributes: { 'foo' => 'bar' } + assert_equal 'bar', (node.remove_attr 'foo') + assert_nil node.attr('foo') end test 'set_attr should not overwrite existing key if overwrite is false' do - node = Asciidoctor::Block.new nil, :paragraph, :attributes => { 'foo' => 'bar' } + node = Asciidoctor::Block.new nil, :paragraph, attributes: { 'foo' => 'bar' } assert_equal 'bar', (node.attr 'foo') node.set_attr 'foo', 'baz', false assert_equal 'bar', (node.attr 'foo') end test 'set_attr should overwrite existing key by default' do - node = Asciidoctor::Block.new nil, :paragraph, :attributes => { 'foo' => 'bar' } + node = Asciidoctor::Block.new nil, :paragraph, attributes: { 'foo' => 'bar' } assert_equal 'bar', (node.attr 'foo') node.set_attr 'foo', 'baz' assert_equal 'baz', (node.attr 'foo') end test 'set_attr should set header attribute in loaded document' do - input = <<-EOS -:uri: http://example.org + input = <<~'EOS' + :uri: http://example.org -{uri} + {uri} EOS - doc = Asciidoctor.load input, :attributes => { 'uri' => 'https://github.com' } + doc = Asciidoctor.load input, attributes: { 'uri' => 'https://github.com' } doc.set_attr 'uri', 'https://google.com' output = doc.convert assert_xpath '//a[@href="https://google.com"]', output, 1 end + test 'set_attribute should set attribute if key is not locked' do + doc = empty_document + refute doc.attr? 'foo' + res = doc.set_attribute 'foo', 'baz' + assert res + assert_equal 'baz', (doc.attr 'foo') + end + + test 'set_attribute should not set key if key is locked' do + doc = empty_document attributes: { 'foo' => 'bar' } + assert_equal 'bar', (doc.attr 'foo') + res = doc.set_attribute 'foo', 'baz' + refute res + assert_equal 'bar', (doc.attr 'foo') + end + + test 'set_attribute should update backend attributes' do + doc = empty_document attributes: { 'backend' => 'html5@' } + assert_equal '', (doc.attr 'backend-html5') + res = doc.set_attribute 'backend', 'docbook5' + assert res + refute doc.attr? 'backend-html5' + assert_equal '', (doc.attr 'backend-docbook5') + end + test 'verify toc attribute matrix' do - expected_data = <<-EOS -#attributes |toc|toc-position|toc-placement|toc-class -toc | |nil |auto |nil -toc=header | |nil |auto |nil -toc=beeboo | |nil |auto |nil -toc=left | |left |auto |toc2 -toc2 | |left |auto |toc2 -toc=right | |right |auto |toc2 -toc=preamble | |content |preamble |nil -toc=macro | |content |macro |nil -toc toc-placement=macro toc-position=left | |content |macro |nil -toc toc-placement! | |content |macro |nil + expected_data = <<~'EOS' + #attributes |toc|toc-position|toc-placement|toc-class + toc | |nil |auto |nil + toc=header | |nil |auto |nil + toc=beeboo | |nil |auto |nil + toc=left | |left |auto |toc2 + toc2 | |left |auto |toc2 + toc=right | |right |auto |toc2 + toc=preamble | |content |preamble |nil + toc=macro | |content |macro |nil + toc toc-placement=macro toc-position=left | |content |macro |nil + toc toc-placement! | |content |macro |nil EOS - expected = expected_data.strip.lines.map {|l| + expected = expected_data.lines.map do |l| next if l.start_with? '#' l.split('|').map {|e| (e = e.strip) == 'nil' ? nil : e } - }.compact + end.compact expected.each do |expect| raw_attrs, toc, toc_position, toc_placement, toc_class = expect - attrs = Hash[*(raw_attrs.split ' ').map {|e| e.include?('=') ? e.split('=') : [e, ''] }.flatten] - doc = document_from_string '', :attributes => attrs - toc ? (assert doc.attr?('toc', toc)) : (assert !doc.attr?('toc')) - toc_position ? (assert doc.attr?('toc-position', toc_position)) : (assert !doc.attr?('toc-position')) - toc_placement ? (assert doc.attr?('toc-placement', toc_placement)) : (assert !doc.attr?('toc-placement')) - toc_class ? (assert doc.attr?('toc-class', toc_class)) : (assert !doc.attr?('toc-class')) + attrs = Hash[*raw_attrs.split.map {|e| e.include?('=') ? e.split('=', 2) : [e, ''] }.flatten] + doc = document_from_string '', attributes: attrs + toc ? (assert doc.attr?('toc', toc)) : (refute doc.attr?('toc')) + toc_position ? (assert doc.attr?('toc-position', toc_position)) : (refute doc.attr?('toc-position')) + toc_placement ? (assert doc.attr?('toc-placement', toc_placement)) : (refute doc.attr?('toc-placement')) + toc_class ? (assert doc.attr?('toc-class', toc_class)) : (refute doc.attr?('toc-class')) end end end context 'Interpolation' do - test "render properly with simple names" do - html = render_string(":frog: Tanglefoot\n:my_super-hero: Spiderman\n\nYo, {frog}!\nBeat {my_super-hero}!") - result = Nokogiri::HTML(html) - assert_equal "Yo, Tanglefoot!\nBeat Spiderman!", result.css("p").first.content.strip + test "convert properly with simple names" do + html = convert_string(":frog: Tanglefoot\n:my_super-hero: Spiderman\n\nYo, {frog}!\nBeat {my_super-hero}!") + assert_xpath %(//p[text()="Yo, Tanglefoot!\nBeat Spiderman!"]), html, 1 end test 'attribute lookup is not case sensitive' do - input = <<-EOS -:He-Man: The most powerful man in the universe + input = <<~'EOS' + :He-Man: The most powerful man in the universe -He-Man: {He-Man} + He-Man: {He-Man} -She-Ra: {She-Ra} + She-Ra: {She-Ra} EOS - result = render_embedded_string input, :attributes => {'She-Ra' => 'The Princess of Power'} + result = convert_string_to_embedded input, attributes: { 'She-Ra' => 'The Princess of Power' } assert_xpath '//p[text()="He-Man: The most powerful man in the universe"]', result, 1 assert_xpath '//p[text()="She-Ra: The Princess of Power"]', result, 1 end - test "render properly with single character name" do - html = render_string(":r: Ruby\n\nR is for {r}!") - result = Nokogiri::HTML(html) - assert_equal 'R is for Ruby!', result.css("p").first.content.strip + test "convert properly with single character name" do + html = convert_string(":r: Ruby\n\nR is for {r}!") + assert_xpath %(//p[text()="R is for Ruby!"]), html, 1 end test "collapses spaces in attribute names" do - input = <<-EOS -Main Header -=========== -:My frog: Tanglefoot + input = <<~'EOS' + Main Header + =========== + :My frog: Tanglefoot -Yo, {myfrog}! + Yo, {myfrog}! EOS - output = render_string input + output = convert_string input assert_xpath '(//p)[1][text()="Yo, Tanglefoot!"]', output, 1 end - test "ignores lines with bad attributes if attribute-missing is drop-line" do - input = <<-EOS -:attribute-missing: drop-line + test 'ignores lines with bad attributes if attribute-missing is drop-line' do + input = <<~'EOS' + :attribute-missing: drop-line -This is -blah blah {foobarbaz} -all there is. + This is + blah blah {foobarbaz} + all there is. EOS - html = render_embedded_string input - result = Nokogiri::HTML(html) - refute_match(/blah blah/m, result.css("p").first.content.strip) + output = convert_string_to_embedded input + para = xmlnodes_at_css 'p', output, 1 + refute_includes 'blah blah', para.content + assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: foobarbaz' end - test "attribute value gets interpretted when rendering" do + test "attribute value gets interpretted when converting" do doc = document_from_string(":google: http://google.com[Google]\n\n{google}") assert_equal 'http://google.com[Google]', doc.attributes['google'] - output = doc.render + output = doc.convert assert_xpath '//a[@href="http://google.com"][text() = "Google"]', output, 1 end test 'should drop line with reference to missing attribute if attribute-missing attribute is drop-line' do - input = <<-EOS -:attribute-missing: drop-line + input = <<~'EOS' + :attribute-missing: drop-line -Line 1: This line should appear in the output. -Line 2: Oh no, a {bogus-attribute}! This line should not appear in the output. + Line 1: This line should appear in the output. + Line 2: Oh no, a {bogus-attribute}! This line should not appear in the output. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_match(/Line 1/, output) refute_match(/Line 2/, output) + assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: bogus-attribute' end test 'should not drop line with reference to missing attribute by default' do - input = <<-EOS -Line 1: This line should appear in the output. -Line 2: A {bogus-attribute}! This time, this line should appear in the output. + input = <<~'EOS' + Line 1: This line should appear in the output. + Line 2: A {bogus-attribute}! This time, this line should appear in the output. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_match(/Line 1/, output) assert_match(/Line 2/, output) assert_match(/\{bogus-attribute\}/, output) end test 'should drop line with attribute unassignment by default' do - input = <<-EOS -:a: + input = <<~'EOS' + :a: -Line 1: This line should appear in the output. -Line 2: {set:a!}This line should not appear in the output. + Line 1: This line should appear in the output. + Line 2: {set:a!}This line should not appear in the output. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_match(/Line 1/, output) refute_match(/Line 2/, output) end test 'should not drop line with attribute unassignment if attribute-undefined is drop' do - input = <<-EOS -:attribute-undefined: drop -:a: + input = <<~'EOS' + :attribute-undefined: drop + :a: -Line 1: This line should appear in the output. -Line 2: {set:a!}This line should not appear in the output. + Line 1: This line should appear in the output. + Line 2: {set:a!}This line should appear in the output. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_match(/Line 1/, output) assert_match(/Line 2/, output) refute_match(/\{set:a!\}/, output) end + test 'should drop line that only contains attribute assignment' do + input = <<~'EOS' + Line 1 + {set:a} + Line 2 + EOS + + output = convert_string_to_embedded input + assert_xpath %(//p[text()="Line 1\nLine 2"]), output, 1 + end + + test 'should drop line that only contains unresolved attribute when attribute-missing is drop' do + input = <<~'EOS' + Line 1 + {unresolved} + Line 2 + EOS + + output = convert_string_to_embedded input, attributes: { 'attribute-missing' => 'drop' } + assert_xpath %(//p[text()="Line 1\nLine 2"]), output, 1 + end + test "substitutes inside unordered list items" do - html = render_string(":foo: bar\n* snort at the {foo}\n* yawn") - result = Nokogiri::HTML(html) - assert_match(/snort at the bar/, result.css("li").first.content.strip) + html = convert_string(":foo: bar\n* snort at the {foo}\n* yawn") + assert_xpath %(//li/p[text()="snort at the bar"]), html, 1 end test 'substitutes inside section title' do - output = render_string(":prefix: Cool\n\n== {prefix} Title\n\ncontent") - result = Nokogiri::HTML(output) - assert_match(/Cool Title/, result.css('h2').first.content) - assert_match(/_cool_title/, result.css('h2').first.attr('id')) + output = convert_string(":prefix: Cool\n\n== {prefix} Title\n\ncontent") + assert_xpath '//h2[text()="Cool Title"]', output, 1 + assert_css 'h2#_cool_title', output, 1 end test 'interpolates attribute defined in header inside attribute entry in header' do - input = <<-EOS -= Title -Author Name -:attribute-a: value -:attribute-b: {attribute-a} + input = <<~'EOS' + = Title + Author Name + :attribute-a: value + :attribute-b: {attribute-a} -preamble + preamble EOS - doc = document_from_string(input, :parse_header_only => true) + doc = document_from_string(input, parse_header_only: true) assert_equal 'value', doc.attributes['attribute-b'] end test 'interpolates author attribute inside attribute entry in header' do - input = <<-EOS -= Title -Author Name -:name: {author} + input = <<~'EOS' + = Title + Author Name + :name: {author} -preamble + preamble EOS - doc = document_from_string(input, :parse_header_only => true) + doc = document_from_string(input, parse_header_only: true) assert_equal 'Author Name', doc.attributes['name'] end test 'interpolates revinfo attribute inside attribute entry in header' do - input = <<-EOS -= Title -Author Name -2013-01-01 -:date: {revdate} + input = <<~'EOS' + = Title + Author Name + 2013-01-01 + :date: {revdate} -preamble + preamble EOS - doc = document_from_string(input, :parse_header_only => true) + doc = document_from_string(input, parse_header_only: true) assert_equal '2013-01-01', doc.attributes['date'] end test 'attribute entries can resolve previously defined attributes' do - input = <<-EOS -= Title -Author Name -v1.0, 2010-01-01: First release! -:a: value -:a2: {a} -:revdate2: {revdate} + input = <<~'EOS' + = Title + Author Name + v1.0, 2010-01-01: First release! + :a: value + :a2: {a} + :revdate2: {revdate} -{a} == {a2} + {a} == {a2} -{revdate} == {revdate2} + {revdate} == {revdate2} EOS doc = document_from_string input @@ -592,165 +803,178 @@ assert_equal 'value', doc.attr('a') assert_equal 'value', doc.attr('a2') - output = doc.render - assert output.include?('value == value') - assert output.include?('2010-01-01 == 2010-01-01') + output = doc.convert + assert_includes output, 'value == value' + assert_includes output, '2010-01-01 == 2010-01-01' + end + + test 'should warn if unterminated block comment is detected in document header' do + input = <<~'EOS' + = Document Title + :foo: bar + //// + :hey: there + + content + EOS + doc = document_from_string input + assert_nil doc.attr('hey') + assert_message @logger, :WARN, ': line 3: unterminated comment block', Hash end test 'substitutes inside block title' do - input = <<-EOS -:gem_name: asciidoctor + input = <<~'EOS' + :gem_name: asciidoctor -.Require the +{gem_name}+ gem -To use {gem_name}, the first thing to do is to import it in your Ruby source file. + .Require the +{gem_name}+ gem + To use {gem_name}, the first thing to do is to import it in your Ruby source file. EOS - output = render_embedded_string input, :attributes => {'compat-mode' => ''} + output = convert_string_to_embedded input, attributes: { 'compat-mode' => '' } assert_xpath '//*[@class="title"]/code[text()="asciidoctor"]', output, 1 - input = <<-EOS -:gem_name: asciidoctor + input = <<~'EOS' + :gem_name: asciidoctor -.Require the `{gem_name}` gem -To use {gem_name}, the first thing to do is to import it in your Ruby source file. + .Require the `{gem_name}` gem + To use {gem_name}, the first thing to do is to import it in your Ruby source file. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="title"]/code[text()="asciidoctor"]', output, 1 end - test 'renders attribute until it is deleted' do - input = <<-EOS -:foo: bar + test 'sets attribute until it is deleted' do + input = <<~'EOS' + :foo: bar -Crossing the {foo}. + Crossing the {foo}. -:foo!: + :foo!: -Belly up to the {foo}. + Belly up to the {foo}. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//p[text()="Crossing the bar."]', output, 1 assert_xpath '//p[text()="Belly up to the bar."]', output, 0 end test 'should allow compat-mode to be set and unset in middle of document' do - input = <<-EOS -:foo: bar + input = <<~'EOS' + :foo: bar -[[paragraph-a]] -`{foo}` + [[paragraph-a]] + `{foo}` -:compat-mode!: + :compat-mode!: -[[paragraph-b]] -`{foo}` + [[paragraph-b]] + `{foo}` -:compat-mode: + :compat-mode: -[[paragraph-c]] -`{foo}` + [[paragraph-c]] + `{foo}` EOS - result = render_embedded_string input, :attributes => {'compat-mode' => '@'} + result = convert_string_to_embedded input, attributes: { 'compat-mode' => '@' } assert_xpath '/*[@id="paragraph-a"]//code[text()="{foo}"]', result, 1 assert_xpath '/*[@id="paragraph-b"]//code[text()="bar"]', result, 1 assert_xpath '/*[@id="paragraph-c"]//code[text()="{foo}"]', result, 1 end test 'does not disturb attribute-looking things escaped with backslash' do - html = render_string(":foo: bar\nThis is a \\{foo} day.") - result = Nokogiri::HTML(html) - assert_equal 'This is a {foo} day.', result.css('p').first.content.strip + html = convert_string(":foo: bar\nThis is a \\{foo} day.") + assert_xpath '//p[text()="This is a {foo} day."]', html, 1 end test 'does not disturb attribute-looking things escaped with literals' do - html = render_string(":foo: bar\nThis is a +++{foo}+++ day.") - result = Nokogiri::HTML(html) - assert_equal 'This is a {foo} day.', result.css('p').first.content.strip + html = convert_string(":foo: bar\nThis is a +++{foo}+++ day.") + assert_xpath '//p[text()="This is a {foo} day."]', html, 1 end test 'does not substitute attributes inside listing blocks' do - input = <<-EOS -:forecast: snow + input = <<~'EOS' + :forecast: snow ----- -puts 'The forecast for today is {forecast}' ----- + ---- + puts 'The forecast for today is {forecast}' + ---- EOS - output = render_string(input) + output = convert_string(input) assert_match(/\{forecast\}/, output) end test 'does not substitute attributes inside literal blocks' do - input = <<-EOS -:foo: bar + input = <<~'EOS' + :foo: bar -.... -You insert the text {foo} to expand the value -of the attribute named foo in your document. -.... - EOS - output = render_string(input) + .... + You insert the text {foo} to expand the value + of the attribute named foo in your document. + .... + EOS + output = convert_string(input) assert_match(/\{foo\}/, output) end test 'does not show docdir and shows relative docfile if safe mode is SERVER or greater' do - input = <<-EOS -* docdir: {docdir} -* docfile: {docfile} + input = <<~'EOS' + * docdir: {docdir} + * docfile: {docfile} EOS docdir = Dir.pwd - docfile = File.join(docdir, 'sample.asciidoc') - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docdir' => docdir, 'docfile' => docfile} + docfile = File.join(docdir, 'sample.adoc') + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => docdir, 'docfile' => docfile } assert_xpath '//li[1]/p[text()="docdir: "]', output, 1 - assert_xpath '//li[2]/p[text()="docfile: sample.asciidoc"]', output, 1 + assert_xpath '//li[2]/p[text()="docfile: sample.adoc"]', output, 1 end test 'shows absolute docdir and docfile paths if safe mode is less than SERVER' do - input = <<-EOS -* docdir: {docdir} -* docfile: {docfile} + input = <<~'EOS' + * docdir: {docdir} + * docfile: {docfile} EOS docdir = Dir.pwd - docfile = File.join(docdir, 'sample.asciidoc') - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => docdir, 'docfile' => docfile} + docfile = File.join(docdir, 'sample.adoc') + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => docdir, 'docfile' => docfile } assert_xpath %(//li[1]/p[text()="docdir: #{docdir}"]), output, 1 assert_xpath %(//li[2]/p[text()="docfile: #{docfile}"]), output, 1 end test 'assigns attribute defined in attribute reference with set prefix and value' do input = '{set:foo:bar}{foo}' - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//p', output, 1 assert_xpath '//p[text()="bar"]', output, 1 end test 'assigns attribute defined in attribute reference with set prefix and no value' do input = "{set:foo}\n{foo}yes" - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//p', output, 1 assert_xpath '//p[normalize-space(text())="yes"]', output, 1 end test 'assigns attribute defined in attribute reference with set prefix and empty value' do input = "{set:foo:}\n{foo}yes" - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//p', output, 1 assert_xpath '//p[normalize-space(text())="yes"]', output, 1 end test 'unassigns attribute defined in attribute reference with set prefix' do - input = <<-EOS -:attribute-missing: drop-line -:foo: + input = <<~'EOS' + :attribute-missing: drop-line + :foo: -{set:foo!} -{foo}yes + {set:foo!} + {foo}yes EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//p', output, 1 assert_xpath '//p/child::text()', output, 0 + assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: foo' end end @@ -758,133 +982,153 @@ test "substitute intrinsics" do Asciidoctor::INTRINSIC_ATTRIBUTES.each_pair do |key, value| - html = render_string("Look, a {#{key}} is here") + html = convert_string("Look, a {#{key}} is here") # can't use Nokogiri because it interprets the HTML entities and we can't match them assert_match(/Look, a #{Regexp.escape(value)} is here/, html) end end test "don't escape intrinsic substitutions" do - html = render_string('happy{nbsp}together') + html = convert_string('happy{nbsp}together') assert_match(/happy together/, html) end test "escape special characters" do - html = render_string('&') + html = convert_string('&') assert_match(/<node>&<\/node>/, html) end test 'creates counter' do - input = <<-EOS -{counter:mycounter} - EOS + input = '{counter:mycounter}' doc = document_from_string input - output = doc.render + output = doc.convert assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="1"]', output, 1 end test 'creates counter silently' do - input = <<-EOS -{counter2:mycounter} - EOS + input = '{counter2:mycounter}' doc = document_from_string input - output = doc.render + output = doc.convert assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="1"]', output, 0 end test 'creates counter with numeric seed value' do - input = <<-EOS -{counter2:mycounter:10} - EOS + input = '{counter2:mycounter:10}' doc = document_from_string input - doc.render + doc.convert assert_equal 10, doc.attributes['mycounter'] end test 'creates counter with character seed value' do - input = <<-EOS -{counter2:mycounter:A} - EOS + input = '{counter2:mycounter:A}' doc = document_from_string input - doc.render + doc.convert assert_equal 'A', doc.attributes['mycounter'] end test 'increments counter with numeric value' do - input = <<-EOS -:mycounter: 1 + input = <<~'EOS' + :mycounter: 1 -{counter:mycounter} + {counter:mycounter} -{mycounter} + {mycounter} EOS doc = document_from_string input - output = doc.render + output = doc.convert assert_equal 2, doc.attributes['mycounter'] assert_xpath '//p[text()="2"]', output, 2 end test 'increments counter with character value' do - input = <<-EOS -:mycounter: @ + input = <<~'EOS' + :mycounter: @ -{counter:mycounter} + {counter:mycounter} -{mycounter} + {mycounter} EOS doc = document_from_string input - output = doc.render + output = doc.convert assert_equal 'A', doc.attributes['mycounter'] assert_xpath '//p[text()="A"]', output, 2 end test 'counter uses 0 as seed value if seed attribute is nil' do - input = <<-EOS -:mycounter: + input = <<~'EOS' + :mycounter: -{counter:mycounter} + {counter:mycounter} -{mycounter} + {mycounter} EOS doc = document_from_string input - output = doc.render :header_footer => false + output = doc.convert standalone: false assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="1"]', output, 2 end test 'counter value can be reset by attribute entry' do - input = <<-EOS -:mycounter: + input = <<~'EOS' + :mycounter: -before: {counter:mycounter} {counter:mycounter} {counter:mycounter} + before: {counter:mycounter} {counter:mycounter} {counter:mycounter} -:mycounter!: + :mycounter!: -after: {counter:mycounter} + after: {counter:mycounter} EOS doc = document_from_string input - output = doc.render :header_footer => false + output = doc.convert standalone: false assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="before: 1 2 3"]', output, 1 assert_xpath '//p[text()="after: 1"]', output, 1 end + + test 'nested document should use counter from parent document' do + input = <<~'EOS' + .Title for Foo + image::foo.jpg[] + + [cols="2*a"] + |=== + | + .Title for Bar + image::bar.jpg[] + + | + .Title for Baz + image::baz.jpg[] + |=== + + .Title for Qux + image::qux.jpg[] + EOS + + output = convert_string_to_embedded input + assert_xpath '//div[@class="title"]', output, 4 + assert_xpath '//div[@class="title"][text() = "Figure 1. Title for Foo"]', output, 1 + assert_xpath '//div[@class="title"][text() = "Figure 2. Title for Bar"]', output, 1 + assert_xpath '//div[@class="title"][text() = "Figure 3. Title for Baz"]', output, 1 + assert_xpath '//div[@class="title"][text() = "Figure 4. Title for Qux"]', output, 1 + end end context 'Block attributes' do test 'parses attribute names as name token' do - input = <<-EOS -[normal,foo="bar",_foo="_bar",foo1="bar1",foo-foo="bar-bar",foo.foo="bar.bar"] -content + input = <<~'EOS' + [normal,foo="bar",_foo="_bar",foo1="bar1",foo-foo="bar-bar",foo.foo="bar.bar"] + content EOS block = block_from_string input @@ -896,11 +1140,11 @@ end test 'positional attributes assigned to block' do - input = <<-EOS -[quote, author, source] -____ -A famous quote. -____ + input = <<~'EOS' + [quote, author, source] + ____ + A famous quote. + ____ EOS doc = document_from_string(input) qb = doc.blocks.first @@ -912,11 +1156,11 @@ end test 'normal substitutions are performed on single-quoted positional attribute' do - input = <<-EOS -[quote, author, 'http://wikipedia.org[source]'] -____ -A famous quote. -____ + input = <<~'EOS' + [quote, author, 'http://wikipedia.org[source]'] + ____ + A famous quote. + ____ EOS doc = document_from_string(input) qb = doc.blocks.first @@ -928,11 +1172,11 @@ end test 'normal substitutions are performed on single-quoted named attribute' do - input = <<-EOS -[quote, author, citetitle='http://wikipedia.org[source]'] -____ -A famous quote. -____ + input = <<~'EOS' + [quote, author, citetitle='http://wikipedia.org[source]'] + ____ + A famous quote. + ____ EOS doc = document_from_string(input) qb = doc.blocks.first @@ -944,33 +1188,33 @@ end test 'normal substitutions are performed once on single-quoted named title attribute' do - input = <<-EOS -[title='*title*'] -content + input = <<~'EOS' + [title='*title*'] + content EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="title"]/strong[text()="title"]', output, 1 end - test 'attribute list may begin with space' do - input = <<-EOS -[ quote] -____ -A famous quote. -____ + test 'attribute list may not begin with space' do + input = <<~'EOS' + [ quote] + ____ + A famous quote. + ____ EOS doc = document_from_string input - qb = doc.blocks.first - assert_equal 'quote', qb.style + b1 = doc.blocks.first + assert_equal ['[ quote]'], b1.lines end test 'attribute list may begin with comma' do - input = <<-EOS -[, author, source] -____ -A famous quote. -____ + input = <<~'EOS' + [, author, source] + ____ + A famous quote. + ____ EOS doc = document_from_string input @@ -981,11 +1225,11 @@ end test 'first attribute in list may be double quoted' do - input = <<-EOS -["quote", "author", "source", role="famous"] -____ -A famous quote. -____ + input = <<~'EOS' + ["quote", "author", "source", role="famous"] + ____ + A famous quote. + ____ EOS doc = document_from_string input @@ -997,11 +1241,11 @@ end test 'first attribute in list may be single quoted' do - input = <<-EOS -['quote', 'author', 'source', role='famous'] -____ -A famous quote. -____ + input = <<~'EOS' + ['quote', 'author', 'source', role='famous'] + ____ + A famous quote. + ____ EOS doc = document_from_string input @@ -1013,20 +1257,20 @@ end test 'attribute with value None without quotes is ignored' do - input = <<-EOS -[id=None] -paragraph + input = <<~'EOS' + [id=None] + paragraph EOS doc = document_from_string input para = doc.blocks.first - assert !para.attributes.has_key?('id') + refute para.attributes.key?('id') end test 'role? returns true if role is assigned' do - input = <<-EOS -[role="lead"] -A paragraph + input = <<~'EOS' + [role="lead"] + A paragraph EOS doc = document_from_string input @@ -1034,35 +1278,59 @@ assert p.role? end + test 'role? does not return true if role attribute is set on document' do + input = <<~'EOS' + :role: lead + + A paragraph + EOS + + doc = document_from_string input + p = doc.blocks.first + refute p.role? + end + test 'role? can check for exact role name match' do - input = <<-EOS -[role="lead"] -A paragraph + input = <<~'EOS' + [role="lead"] + A paragraph EOS doc = document_from_string input p = doc.blocks.first assert p.role?('lead') p2 = doc.blocks.last - assert !p2.role?('final') + refute p2.role?('final') end test 'has_role? can check for precense of role name' do - input = <<-EOS -[role="lead abstract"] -A paragraph + input = <<~'EOS' + [role="lead abstract"] + A paragraph EOS doc = document_from_string input p = doc.blocks.first - assert !p.role?('lead') + refute p.role?('lead') assert p.has_role?('lead') end + test 'has_role? does not look for role defined as document attribute' do + input = <<~'EOS' + :role: lead abstract + + A paragraph + EOS + + doc = document_from_string input + p = doc.blocks.first + refute p.has_role?('lead') + end + test 'roles returns array of role names' do - input = <<-EOS -[role="story lead"] -A paragraph + input = <<~'EOS' + [role="story lead"] + A paragraph EOS doc = document_from_string input @@ -1071,8 +1339,18 @@ end test 'roles returns empty array if role attribute is not set' do - input = <<-EOS -A paragraph + input = 'a paragraph' + + doc = document_from_string input + p = doc.blocks.first + assert_equal [], p.roles + end + + test 'roles does not return value of roles document attribute' do + input = <<~'EOS' + :role: story lead + + A paragraph EOS doc = document_from_string input @@ -1081,11 +1359,11 @@ end test "Attribute substitutions are performed on attribute list before parsing attributes" do - input = <<-EOS -:lead: role="lead" + input = <<~'EOS' + :lead: role="lead" -[{lead}] -A paragraph + [{lead}] + A paragraph EOS doc = document_from_string(input) para = doc.blocks.first @@ -1093,152 +1371,237 @@ end test 'id, role and options attributes can be specified on block style using shorthand syntax' do - input = <<-EOS -[normal#first.lead%step] -A normal paragraph. + input = <<~'EOS' + [literal#first.lead%step] + A literal paragraph. + EOS + doc = document_from_string(input) + para = doc.blocks.first + assert_equal :literal, para.context + assert_equal 'first', para.attributes['id'] + assert_equal 'lead', para.attributes['role'] + assert para.attributes.key?('step-option') + refute para.attributes.key?('options') + end + + test 'id, role and options attributes can be specified using shorthand syntax on block style using multiple block attribute lines' do + input = <<~'EOS' + [literal] + [#first] + [.lead] + [%step] + A literal paragraph. EOS doc = document_from_string(input) para = doc.blocks.first + assert_equal :literal, para.context assert_equal 'first', para.attributes['id'] assert_equal 'lead', para.attributes['role'] - assert_equal 'step', para.attributes['options'] - assert para.attributes.has_key?('step-option') + assert para.attributes.key?('step-option') + refute para.attributes.key?('options') end test 'multiple roles and options can be specified in block style using shorthand syntax' do - input = <<-EOS -[.role1%option1.role2%option2] -Text + input = <<~'EOS' + [.role1%option1.role2%option2] + Text EOS doc = document_from_string input para = doc.blocks.first assert_equal 'role1 role2', para.attributes['role'] - assert_equal 'option1,option2', para.attributes['options'] - assert para.attributes.has_key?('option1-option') - assert para.attributes.has_key?('option2-option') + assert para.attributes.key?('option1-option') + assert para.attributes.key?('option2-option') + refute para.attributes.key?('options') + end + + test 'options specified using shorthand syntax on block style across multiple lines should be additive' do + input = <<~'EOS' + [%option1] + [%option2] + Text + EOS + + doc = document_from_string input + para = doc.blocks.first + assert para.attributes.key?('option1-option') + assert para.attributes.key?('option2-option') + refute para.attributes.key?('options') + end + + test 'roles specified using shorthand syntax on block style across multiple lines should be additive' do + input = <<~'EOS' + [.role1] + [.role2.role3] + Text + EOS + + doc = document_from_string input + para = doc.blocks.first + assert_equal 'role1 role2 role3', para.attributes['role'] + end + + test 'setting a role using the role attribute replaces any existing roles' do + input = <<~'EOS' + [.role1] + [role=role2] + [.role3] + Text + EOS + + doc = document_from_string input + para = doc.blocks.first + assert_equal 'role2 role3', para.attributes['role'] + end + + test 'setting a role using the shorthand syntax on block style should not clear the ID' do + input = <<~'EOS' + [#id] + [.role] + Text + EOS + + doc = document_from_string input + para = doc.blocks.first + assert_equal 'id', para.id + assert_equal 'role', para.role end test 'a role can be added using add_role when the node has no roles' do - input = <<-EOS -A normal paragraph - EOS + input = 'A normal paragraph' doc = document_from_string(input) para = doc.blocks.first - para.add_role 'role1' + res = para.add_role 'role1' + assert res assert_equal 'role1', para.attributes['role'] assert para.has_role? 'role1' end test 'a role can be added using add_role when the node already has a role' do - input = <<-EOS -[.role1] -A normal paragraph - EOS + input = <<~'EOS' + [.role1] + A normal paragraph + EOS doc = document_from_string(input) para = doc.blocks.first - para.add_role 'role2' + res = para.add_role 'role2' + assert res assert_equal 'role1 role2', para.attributes['role'] assert para.has_role? 'role1' assert para.has_role? 'role2' end test 'a role is not added using add_role if the node already has that role' do - input = <<-EOS -[.role1] -A normal paragraph - EOS + input = <<~'EOS' + [.role1] + A normal paragraph + EOS doc = document_from_string(input) para = doc.blocks.first - para.add_role 'role1' + res = para.add_role 'role1' + refute res assert_equal 'role1', para.attributes['role'] assert para.has_role? 'role1' end test 'an existing role can be removed using remove_role' do - input = <<-EOS -[.role1.role2] -A normal paragraph - EOS + input = <<~'EOS' + [.role1.role2] + A normal paragraph + EOS doc = document_from_string(input) para = doc.blocks.first - para.remove_role 'role1' + res = para.remove_role 'role1' + assert res assert_equal 'role2', para.attributes['role'] assert para.has_role? 'role2' - assert !para.has_role?('role1') + refute para.has_role?('role1') + end + + test 'roles are removed when last role is removed using remove_role' do + input = <<~'EOS' + [.role1] + A normal paragraph + EOS + doc = document_from_string(input) + para = doc.blocks.first + res = para.remove_role 'role1' + assert res + refute para.role? + assert_nil para.attributes['role'] + refute para.has_role? 'role1' end test 'roles are not changed when a non-existent role is removed using remove_role' do - input = <<-EOS -[.role1] -A normal paragraph - EOS + input = <<~'EOS' + [.role1] + A normal paragraph + EOS doc = document_from_string(input) para = doc.blocks.first - para.remove_role 'role2' + res = para.remove_role 'role2' + refute res assert_equal 'role1', para.attributes['role'] assert para.has_role? 'role1' - assert !para.has_role?('role2') + refute para.has_role?('role2') end test 'roles are not changed when using remove_role if the node has no roles' do - input = <<-EOS -A normal paragraph - EOS + input = 'A normal paragraph' doc = document_from_string(input) para = doc.blocks.first - para.remove_role 'role1' - assert_equal nil, para.attributes['role'] - assert !para.has_role?('role1') + res = para.remove_role 'role1' + refute res + assert_nil para.attributes['role'] + refute para.has_role?('role1') end test 'option can be specified in first position of block style using shorthand syntax' do - input = <<-EOS -[%interactive] -- [x] checked + input = <<~'EOS' + [%interactive] + - [x] checked EOS doc = document_from_string input list = doc.blocks.first - assert_equal 'interactive', list.attributes['options'] - assert list.attributes.has_key?('interactive-option') - assert list.attributes[1] == '%interactive' + assert list.attributes.key? 'interactive-option' + refute list.attributes.key? 'options' end test 'id and role attributes can be specified on section style using shorthand syntax' do - input = <<-EOS -[dedication#dedication.small] -== Section -Content. + input = <<~'EOS' + [dedication#dedication.small] + == Section + Content. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/div[@class="sect1 small"]', output, 1 assert_xpath '/div[@class="sect1 small"]/h2[@id="dedication"]', output, 1 end test 'id attribute specified using shorthand syntax should not create a special section' do - input = <<-EOS -[#idname] -== Section + input = <<~'EOS' + [#idname] + == Section -content + content EOS - doc = document_from_string input, :backend => 'docbook45' + doc = document_from_string input, backend: 'docbook' section = doc.blocks[0] refute_nil section assert_equal :section, section.context - assert !section.special + refute section.special output = doc.convert - assert_css 'section', output, 1 - assert_css 'section#idname', output, 1 + assert_css 'article:root > section', output, 1 + assert_css 'article:root > section[xml|id="idname"]', output, 1 end test "Block attributes are additive" do - input = <<-EOS -[id='foo'] -[role='lead'] -A paragraph. + input = <<~'EOS' + [id='foo'] + [role='lead'] + A paragraph. EOS doc = document_from_string(input) para = doc.blocks.first @@ -1247,16 +1610,16 @@ end test "Last wins for id attribute" do - input = <<-EOS -[[bar]] -[[foo]] -== Section - -paragraph - -[[baz]] -[id='coolio'] -=== Section + input = <<~'EOS' + [[bar]] + [[foo]] + == Section + + paragraph + + [[baz]] + [id='coolio'] + === Section EOS doc = document_from_string(input) sec = doc.first_section @@ -1265,30 +1628,30 @@ assert_equal 'coolio', subsec.id end - test "trailing block attributes tranfer to the following section" do - input = <<-EOS -[[one]] + test "trailing block attributes transfer to the following section" do + input = <<~'EOS' + [[one]] -== Section One + == Section One -paragraph + paragraph -[[sub]] -// try to mess this up! + [[sub]] + // try to mess this up! -=== Sub-section + === Sub-section -paragraph + paragraph -[role='classy'] + [role='classy'] -//// -block comment -//// + //// + block comment + //// -== Section Two + == Section Two -content + content EOS doc = document_from_string(input) section_one = doc.blocks.first diff -Nru asciidoctor-1.5.5/test/blocks_test.rb asciidoctor-2.0.10/test/blocks_test.rb --- asciidoctor-1.5.5/test/blocks_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/blocks_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,26 +1,67 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' + +context 'Blocks' do + default_logger = Asciidoctor::LoggerManager.logger + + setup do + Asciidoctor::LoggerManager.logger = (@logger = Asciidoctor::MemoryLogger.new) + end -context "Blocks" do - context 'Line Breaks' do - test "ruler" do - output = render_string("'''") - assert_xpath '//*[@id="content"]/hr', output, 1 - assert_xpath '//*[@id="content"]/*', output, 1 - end - - test "ruler between blocks" do - output = render_string("Block above\n\n'''\n\nBlock below") - assert_xpath '//*[@id="content"]/hr', output, 1 - assert_xpath '//*[@id="content"]/hr/preceding-sibling::*', output, 1 - assert_xpath '//*[@id="content"]/hr/following-sibling::*', output, 1 + teardown do + Asciidoctor::LoggerManager.logger = default_logger + end + + context 'Layout Breaks' do + test 'horizontal rule' do + %w(''' '''' '''''').each do |line| + output = convert_string_to_embedded line + assert_includes output, '
    ' + end end - test "page break" do - output = render_embedded_string("page 1\n\n<<<\n\npage 2") + test 'horizontal rule with markdown syntax disabled' do + old_markdown_syntax = Asciidoctor::Compliance.markdown_syntax + begin + Asciidoctor::Compliance.markdown_syntax = false + %w(''' '''' '''''').each do |line| + output = convert_string_to_embedded line + assert_includes output, '
    ' + end + %w(--- *** ___).each do |line| + output = convert_string_to_embedded line + refute_includes output, '
    ' + end + ensure + Asciidoctor::Compliance.markdown_syntax = old_markdown_syntax + end + end + + test '< 3 chars does not make horizontal rule' do + %w(' '').each do |line| + output = convert_string_to_embedded line + refute_includes output, '
    ' + assert_includes output, %(

    #{line}

    ) + end + end + + test 'mixed chars does not make horizontal rule' do + [%q(''<), %q('''<), %q(' ' ')].each do |line| + output = convert_string_to_embedded line + refute_includes output, '
    ' + assert_includes output, %(

    #{line.sub '<', '<'}

    ) + end + end + + test 'horizontal rule between blocks' do + output = convert_string_to_embedded %(Block above\n\n'''\n\nBlock below) + assert_xpath '/hr', output, 1 + assert_xpath '/hr/preceding-sibling::*', output, 1 + assert_xpath '/hr/following-sibling::*', output, 1 + end + + test 'page break' do + output = convert_string_to_embedded %(page 1\n\n<<<\n\npage 2) assert_xpath '/*[translate(@style, ";", "")="page-break-after: always"]', output, 1 assert_xpath '/*[translate(@style, ";", "")="page-break-after: always"]/preceding-sibling::div/p[text()="page 1"]', output, 1 assert_xpath '/*[translate(@style, ";", "")="page-break-after: always"]/following-sibling::div/p[text()="page 2"]', output, 1 @@ -29,437 +70,623 @@ context 'Comments' do test 'line comment between paragraphs offset by blank lines' do - input = <<-EOS -first paragraph + input = <<~'EOS' + first paragraph -// line comment + // line comment -second paragraph + second paragraph EOS - output = render_embedded_string input + output = convert_string_to_embedded input refute_match(/line comment/, output) assert_xpath '//p', output, 2 end test 'adjacent line comment between paragraphs' do - input = <<-EOS -first line -// line comment -second line + input = <<~'EOS' + first line + // line comment + second line EOS - output = render_embedded_string input + output = convert_string_to_embedded input refute_match(/line comment/, output) assert_xpath '//p', output, 1 assert_xpath "//p[1][text()='first line\nsecond line']", output, 1 end test 'comment block between paragraphs offset by blank lines' do - input = <<-EOS -first paragraph + input = <<~'EOS' + first paragraph + + //// + block comment + //// + + second paragraph + EOS + output = convert_string_to_embedded input + refute_match(/block comment/, output) + assert_xpath '//p', output, 2 + end -//// -block comment -//// + test 'comment block between paragraphs offset by blank lines inside delimited block' do + input = <<~'EOS' + ==== + first paragraph + + //// + block comment + //// -second paragraph + second paragraph + ==== EOS - output = render_embedded_string input + output = convert_string_to_embedded input refute_match(/block comment/, output) assert_xpath '//p', output, 2 end test 'adjacent comment block between paragraphs' do - input = <<-EOS -first paragraph -//// -block comment -//// -second paragraph + input = <<~'EOS' + first paragraph + //// + block comment + //// + second paragraph EOS - output = render_embedded_string input + output = convert_string_to_embedded input refute_match(/block comment/, output) assert_xpath '//p', output, 2 end - test "can render with block comment at end of document with trailing endlines" do - input = <<-EOS -paragraph + test "can convert with block comment at end of document with trailing newlines" do + input = <<~'EOS' + paragraph -//// -block comment -//// + //// + block comment + //// EOS - output = render_embedded_string input + output = convert_string_to_embedded input refute_match(/block comment/, output) end - test "trailing endlines after block comment at end of document does not create paragraph" do - input = <<-EOS -paragraph - -//// -block comment -//// + test "trailing newlines after block comment at end of document does not create paragraph" do + input = <<~'EOS' + paragraph + + //// + block comment + //// EOS d = document_from_string input assert_equal 1, d.blocks.size - assert_xpath '//p', d.render, 1 + assert_xpath '//p', d.convert, 1 end test 'line starting with three slashes should not be line comment' do - input = <<-EOS -/// not a line comment - EOS - - output = render_embedded_string input - assert !output.strip.empty?, "Line should be emitted => #{input.rstrip}" + input = '/// not a line comment' + output = convert_string_to_embedded input + refute_empty output.strip, "Line should be emitted => #{input.rstrip}" end test 'preprocessor directives should not be processed within comment block within block metadata' do - input = <<-EOS -.sample title -//// -ifdef::asciidoctor[////] -//// -line should be rendered + input = <<~'EOS' + .sample title + //// + ifdef::asciidoctor[////] + //// + line should be shown EOS - output = render_embedded_string input - assert_xpath '//p[text() = "line should be rendered"]', output, 1 + output = convert_string_to_embedded input + assert_xpath '//p[text()="line should be shown"]', output, 1 end test 'preprocessor directives should not be processed within comment block' do - input = <<-EOS -dummy line + input = <<~'EOS' + dummy line + + //// + ifdef::asciidoctor[////] + //// + + line should be shown + EOS -//// -ifdef::asciidoctor[////] -//// + output = convert_string_to_embedded input + assert_xpath '//p[text()="line should be shown"]', output, 1 + end + + test 'should warn if unterminated comment block is detected in body' do + input = <<~'EOS' + before comment block -line should be rendered + //// + content that has been disabled + + supposed to be after comment block, except it got swallowed by block comment EOS - output = render_embedded_string input - assert_xpath '//p[text() = "line should be rendered"]', output, 1 + convert_string_to_embedded input + assert_message @logger, :WARN, ': line 3: unterminated comment block', Hash + end + + test 'should warn if unterminated comment block is detected inside another block' do + input = <<~'EOS' + before sidebar block + + **** + //// + content that has been disabled + **** + + supposed to be after sidebar block, except it got swallowed by block comment + EOS + + convert_string_to_embedded input + assert_message @logger, :WARN, ': line 4: unterminated comment block', Hash end # WARNING if first line of content is a directive, it will get interpretted before we know it's a comment block # it happens because we always look a line ahead...not sure what we can do about it test 'preprocessor directives should not be processed within comment open block' do - input = <<-EOS -[comment] --- -first line of comment -ifdef::asciidoctor[--] -line should not be rendered --- + input = <<~'EOS' + [comment] + -- + first line of comment + ifdef::asciidoctor[--] + line should not be shown + -- EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//p', output, 0 end - # WARNING if first line of content is a directive, it will get interpretted before we know it's a comment block - # it happens because we always look a line ahead...not sure what we can do about it - test 'preprocessor directives should not be processed within comment paragraph' do - input = <<-EOS -[comment] -first line of content -ifdef::asciidoctor[////] + # WARNING this assertion fails if the directive is the first line of the paragraph instead of the second + # it happens because we always look a line ahead; not sure what we can do about it + test 'preprocessor directives should not be processed on subsequent lines of a comment paragraph' do + input = <<~'EOS' + [comment] + first line of content + ifdef::asciidoctor[////] -this line should be rendered + this line should be shown EOS - output = render_embedded_string input - assert_xpath '//p[text() = "this line should be rendered"]', output, 1 + output = convert_string_to_embedded input + assert_xpath '//p[text()="this line should be shown"]', output, 1 end test 'comment style on open block should only skip block' do - input = <<-EOS -[comment] --- -skip + input = <<~'EOS' + [comment] + -- + skip -this block --- + this block + -- -not this text + not this text EOS - result = render_embedded_string input + result = convert_string_to_embedded input assert_xpath '//p', result, 1 assert_xpath '//p[text()="not this text"]', result, 1 end test 'comment style on paragraph should only skip paragraph' do - input = <<-EOS -[comment] -skip -this paragraph + input = <<~'EOS' + [comment] + skip + this paragraph -not this text + not this text EOS - result = render_embedded_string input + result = convert_string_to_embedded input assert_xpath '//p', result, 1 assert_xpath '//p[text()="not this text"]', result, 1 end test 'comment style on paragraph should not cause adjacent block to be skipped' do - input = <<-EOS -[comment] -skip -this paragraph -[example] -not this text + input = <<~'EOS' + [comment] + skip + this paragraph + [example] + not this text EOS - result = render_embedded_string input + result = convert_string_to_embedded input assert_xpath '/*[@class="exampleblock"]', result, 1 assert_xpath '/*[@class="exampleblock"]//*[normalize-space(text())="not this text"]', result, 1 end + + # NOTE this test verifies the nil return value of Parser#next_block + test 'should not drop content that follows skipped content inside a delimited block' do + input = <<~'EOS' + ==== + paragraph + + [comment#idname] + skip + + paragraph + ==== + EOS + result = convert_string_to_embedded input + assert_xpath '/*[@class="exampleblock"]', result, 1 + assert_xpath '/*[@class="exampleblock"]//*[@class="paragraph"]', result, 2 + assert_xpath '//*[@class="paragraph"][@id="idname"]', result, 0 + end + end + + context 'Sidebar Blocks' do + test 'should parse sidebar block' do + input = <<~'EOS' + == Section + + .Sidebar + **** + Content goes here + **** + EOS + result = convert_string input + assert_xpath "//*[@class='sidebarblock']//p", result, 1 + end end context 'Quote and Verse Blocks' do test 'quote block with no attribution' do - input = <<-EOS -____ -A famous quote. -____ + input = <<~'EOS' + ____ + A famous quote. + ____ EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 - assert_xpath '//*[@class = "quoteblock"]//p[text() = "A famous quote."]', output, 1 + assert_xpath '//*[@class="quoteblock"]//p[text()="A famous quote."]', output, 1 end test 'quote block with attribution' do - input = <<-EOS -[quote, Famous Person, Famous Book (1999)] -____ -A famous quote. -____ + input = <<~'EOS' + [quote, Famous Person, Famous Book (1999)] + ____ + A famous quote. + ____ EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 1 assert_css '.quoteblock > .attribution > cite', output, 1 assert_css '.quoteblock > .attribution > br + cite', output, 1 - assert_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]/cite[text() = "Famous Book (1999)"]', output, 1 - attribution = xmlnodes_at_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]', output, 1 + assert_xpath '//*[@class="quoteblock"]/*[@class="attribution"]/cite[text()="Famous Book (1999)"]', output, 1 + attribution = xmlnodes_at_xpath '//*[@class="quoteblock"]/*[@class="attribution"]', output, 1 author = attribution.children.first - assert_equal "#{expand_entity 8212} Famous Person", author.text.strip + assert_equal "#{decode_char 8212} Famous Person", author.text.strip end test 'quote block with attribute and id and role shorthand' do - input = <<-EOS -[quote#think.big, Donald Trump] -____ -As long as your going to be thinking anyway, think big. -____ + input = <<~'EOS' + [quote#justice-to-all.solidarity, Martin Luther King, Jr.] + ____ + Injustice anywhere is a threat to justice everywhere. + ____ + EOS + + output = convert_string_to_embedded input + assert_css '.quoteblock', output, 1 + assert_css '#justice-to-all.quoteblock.solidarity', output, 1 + assert_css '.quoteblock > .attribution', output, 1 + end + + test 'setting ID using style shorthand should not reset block style' do + input = <<~'EOS' + [quote] + [#justice-to-all.solidarity, Martin Luther King, Jr.] + ____ + Injustice anywhere is a threat to justice everywhere. + ____ EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.quoteblock', output, 1 - assert_css '#think.quoteblock.big', output, 1 + assert_css '#justice-to-all.quoteblock.solidarity', output, 1 assert_css '.quoteblock > .attribution', output, 1 end test 'quote block with complex content' do - input = <<-EOS -____ -A famous quote. + input = <<~'EOS' + ____ + A famous quote. -NOTE: _That_ was inspiring. -____ + NOTE: _That_ was inspiring. + ____ EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph', output, 1 assert_css '.quoteblock > blockquote > .paragraph + .admonitionblock', output, 1 end - test 'quote block using air quotes with no attribution' do - input = <<-EOS -"" -A famous quote. -"" - EOS - output = render_string input - assert_css '.quoteblock', output, 1 - assert_css '.quoteblock > blockquote', output, 1 - assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 - assert_css '.quoteblock > .attribution', output, 0 - assert_xpath '//*[@class = "quoteblock"]//p[text() = "A famous quote."]', output, 1 + test 'quote block with attribution converted to DocBook' do + input = <<~'EOS' + [quote, Famous Person, Famous Book (1999)] + ____ + A famous quote. + ____ + EOS + output = convert_string input, backend: :docbook + assert_css 'blockquote', output, 1 + assert_css 'blockquote > simpara', output, 1 + assert_css 'blockquote > attribution', output, 1 + assert_css 'blockquote > attribution > citetitle', output, 1 + assert_xpath '//blockquote/attribution/citetitle[text()="Famous Book (1999)"]', output, 1 + attribution = xmlnodes_at_xpath '//blockquote/attribution', output, 1 + author = attribution.children.first + assert_equal 'Famous Person', author.text.strip + end + + test 'epigraph quote block with attribution converted to DocBook' do + input = <<~'EOS' + [.epigraph, Famous Person, Famous Book (1999)] + ____ + A famous quote. + ____ + EOS + output = convert_string input, backend: :docbook + assert_css 'epigraph', output, 1 + assert_css 'epigraph > simpara', output, 1 + assert_css 'epigraph > attribution', output, 1 + assert_css 'epigraph > attribution > citetitle', output, 1 + assert_xpath '//epigraph/attribution/citetitle[text()="Famous Book (1999)"]', output, 1 + attribution = xmlnodes_at_xpath '//epigraph/attribution', output, 1 + author = attribution.children.first + assert_equal 'Famous Person', author.text.strip end test 'markdown-style quote block with single paragraph and no attribution' do - input = <<-EOS -> A famous quote. -> Some more inspiring words. + input = <<~'EOS' + > A famous quote. + > Some more inspiring words. EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 - assert_xpath %(//*[@class = "quoteblock"]//p[text() = "A famous quote.\nSome more inspiring words."]), output, 1 + assert_xpath %(//*[@class="quoteblock"]//p[text()="A famous quote.\nSome more inspiring words."]), output, 1 end test 'lazy markdown-style quote block with single paragraph and no attribution' do - input = <<-EOS -> A famous quote. -Some more inspiring words. + input = <<~'EOS' + > A famous quote. + Some more inspiring words. EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 - assert_xpath %(//*[@class = "quoteblock"]//p[text() = "A famous quote.\nSome more inspiring words."]), output, 1 + assert_xpath %(//*[@class="quoteblock"]//p[text()="A famous quote.\nSome more inspiring words."]), output, 1 end test 'markdown-style quote block with multiple paragraphs and no attribution' do - input = <<-EOS -> A famous quote. -> -> Some more inspiring words. + input = <<~'EOS' + > A famous quote. + > + > Some more inspiring words. EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 2 assert_css '.quoteblock > .attribution', output, 0 - assert_xpath %((//*[@class = "quoteblock"]//p)[1][text() = "A famous quote."]), output, 1 - assert_xpath %((//*[@class = "quoteblock"]//p)[2][text() = "Some more inspiring words."]), output, 1 + assert_xpath %((//*[@class="quoteblock"]//p)[1][text()="A famous quote."]), output, 1 + assert_xpath %((//*[@class="quoteblock"]//p)[2][text()="Some more inspiring words."]), output, 1 end test 'markdown-style quote block with multiple blocks and no attribution' do - input = <<-EOS -> A famous quote. -> -> NOTE: Some more inspiring words. + input = <<~'EOS' + > A famous quote. + > + > NOTE: Some more inspiring words. EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > blockquote > .admonitionblock', output, 1 assert_css '.quoteblock > .attribution', output, 0 - assert_xpath %((//*[@class = "quoteblock"]//p)[1][text() = "A famous quote."]), output, 1 - assert_xpath %((//*[@class = "quoteblock"]//*[@class = "admonitionblock note"]//*[@class="content"])[1][normalize-space(text()) = "Some more inspiring words."]), output, 1 + assert_xpath %((//*[@class="quoteblock"]//p)[1][text()="A famous quote."]), output, 1 + assert_xpath %((//*[@class="quoteblock"]//*[@class="admonitionblock note"]//*[@class="content"])[1][normalize-space(text())="Some more inspiring words."]), output, 1 end test 'markdown-style quote block with single paragraph and attribution' do - input = <<-EOS -> A famous quote. -> Some more inspiring words. -> -- Famous Person, Famous Source, Volume 1 (1999) + input = <<~'EOS' + > A famous quote. + > Some more inspiring words. + > -- Famous Person, Famous Source, Volume 1 (1999) EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 - assert_xpath %(//*[@class = "quoteblock"]//p[text() = "A famous quote.\nSome more inspiring words."]), output, 1 + assert_xpath %(//*[@class="quoteblock"]//p[text()="A famous quote.\nSome more inspiring words."]), output, 1 assert_css '.quoteblock > .attribution', output, 1 assert_css '.quoteblock > .attribution > cite', output, 1 assert_css '.quoteblock > .attribution > br + cite', output, 1 - assert_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]/cite[text() = "Famous Source, Volume 1 (1999)"]', output, 1 - attribution = xmlnodes_at_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]', output, 1 + assert_xpath '//*[@class="quoteblock"]/*[@class="attribution"]/cite[text()="Famous Source, Volume 1 (1999)"]', output, 1 + attribution = xmlnodes_at_xpath '//*[@class="quoteblock"]/*[@class="attribution"]', output, 1 author = attribution.children.first - assert_equal "#{expand_entity 8212} Famous Person", author.text.strip + assert_equal "#{decode_char 8212} Famous Person", author.text.strip + end + + test 'markdown-style quote block with only attribution' do + input = '> -- Anonymous' + output = convert_string input + assert_css '.quoteblock', output, 1 + assert_css '.quoteblock > blockquote', output, 1 + assert_css '.quoteblock > blockquote > *', output, 0 + assert_css '.quoteblock > .attribution', output, 1 + assert_xpath %(//*[@class="quoteblock"]//*[@class="attribution"][contains(text(),"Anonymous")]), output, 1 + end + + test 'should parse credit line in markdown-style quote block like positional block attributes' do + input = <<~'EOS' + > I hold it that a little rebellion now and then is a good thing, + > and as necessary in the political world as storms in the physical. + -- Thomas Jefferson, https://jeffersonpapers.princeton.edu/selected-documents/james-madison-1[The Papers of Thomas Jefferson, Volume 11] + EOS + + output = convert_string_to_embedded input + assert_css '.quoteblock', output, 1 + assert_css '.quoteblock cite a[href="https://jeffersonpapers.princeton.edu/selected-documents/james-madison-1"]', output, 1 end test 'quoted paragraph-style quote block with attribution' do - input = <<-EOS -"A famous quote. -Some more inspiring words." --- Famous Person, Famous Source, Volume 1 (1999) + input = <<~'EOS' + "A famous quote. + Some more inspiring words." + -- Famous Person, Famous Source, Volume 1 (1999) EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 - assert_xpath %(//*[@class = "quoteblock"]/blockquote[normalize-space(text()) = "A famous quote. Some more inspiring words."]), output, 1 + assert_xpath %(//*[@class="quoteblock"]/blockquote[normalize-space(text())="A famous quote. Some more inspiring words."]), output, 1 assert_css '.quoteblock > .attribution', output, 1 assert_css '.quoteblock > .attribution > cite', output, 1 assert_css '.quoteblock > .attribution > br + cite', output, 1 - assert_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]/cite[text() = "Famous Source, Volume 1 (1999)"]', output, 1 - attribution = xmlnodes_at_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]', output, 1 + assert_xpath '//*[@class="quoteblock"]/*[@class="attribution"]/cite[text()="Famous Source, Volume 1 (1999)"]', output, 1 + attribution = xmlnodes_at_xpath '//*[@class="quoteblock"]/*[@class="attribution"]', output, 1 author = attribution.children.first - assert_equal "#{expand_entity 8212} Famous Person", author.text.strip + assert_equal "#{decode_char 8212} Famous Person", author.text.strip + end + + test 'should parse credit line in quoted paragraph-style quote block like positional block attributes' do + input = <<~'EOS' + "I hold it that a little rebellion now and then is a good thing, + and as necessary in the political world as storms in the physical." + -- Thomas Jefferson, https://jeffersonpapers.princeton.edu/selected-documents/james-madison-1[The Papers of Thomas Jefferson, Volume 11] + EOS + + output = convert_string_to_embedded input + assert_css '.quoteblock', output, 1 + assert_css '.quoteblock cite a[href="https://jeffersonpapers.princeton.edu/selected-documents/james-madison-1"]', output, 1 end test 'single-line verse block without attribution' do - input = <<-EOS -[verse] -____ -A famous verse. -____ + input = <<~'EOS' + [verse] + ____ + A famous verse. + ____ EOS - output = render_string input + output = convert_string input assert_css '.verseblock', output, 1 assert_css '.verseblock > pre', output, 1 assert_css '.verseblock > .attribution', output, 0 assert_css '.verseblock p', output, 0 - assert_xpath '//*[@class = "verseblock"]/pre[normalize-space(text()) = "A famous verse."]', output, 1 + assert_xpath '//*[@class="verseblock"]/pre[normalize-space(text())="A famous verse."]', output, 1 end test 'single-line verse block with attribution' do - input = <<-EOS -[verse, Famous Poet, Famous Poem] -____ -A famous verse. -____ + input = <<~'EOS' + [verse, Famous Poet, Famous Poem] + ____ + A famous verse. + ____ EOS - output = render_string input + output = convert_string input assert_css '.verseblock', output, 1 assert_css '.verseblock p', output, 0 assert_css '.verseblock > pre', output, 1 assert_css '.verseblock > .attribution', output, 1 assert_css '.verseblock > .attribution > cite', output, 1 assert_css '.verseblock > .attribution > br + cite', output, 1 - assert_xpath '//*[@class = "verseblock"]/*[@class = "attribution"]/cite[text() = "Famous Poem"]', output, 1 - attribution = xmlnodes_at_xpath '//*[@class = "verseblock"]/*[@class = "attribution"]', output, 1 + assert_xpath '//*[@class="verseblock"]/*[@class="attribution"]/cite[text()="Famous Poem"]', output, 1 + attribution = xmlnodes_at_xpath '//*[@class="verseblock"]/*[@class="attribution"]', output, 1 author = attribution.children.first - assert_equal "#{expand_entity 8212} Famous Poet", author.text.strip + assert_equal "#{decode_char 8212} Famous Poet", author.text.strip + end + + test 'single-line verse block with attribution converted to DocBook' do + input = <<~'EOS' + [verse, Famous Poet, Famous Poem] + ____ + A famous verse. + ____ + EOS + output = convert_string input, backend: :docbook + assert_css 'blockquote', output, 1 + assert_css 'blockquote simpara', output, 0 + assert_css 'blockquote > literallayout', output, 1 + assert_css 'blockquote > attribution', output, 1 + assert_css 'blockquote > attribution > citetitle', output, 1 + assert_xpath '//blockquote/attribution/citetitle[text()="Famous Poem"]', output, 1 + attribution = xmlnodes_at_xpath '//blockquote/attribution', output, 1 + author = attribution.children.first + assert_equal 'Famous Poet', author.text.strip + end + + test 'single-line epigraph verse block with attribution converted to DocBook' do + input = <<~'EOS' + [verse.epigraph, Famous Poet, Famous Poem] + ____ + A famous verse. + ____ + EOS + output = convert_string input, backend: :docbook + assert_css 'epigraph', output, 1 + assert_css 'epigraph simpara', output, 0 + assert_css 'epigraph > literallayout', output, 1 + assert_css 'epigraph > attribution', output, 1 + assert_css 'epigraph > attribution > citetitle', output, 1 + assert_xpath '//epigraph/attribution/citetitle[text()="Famous Poem"]', output, 1 + attribution = xmlnodes_at_xpath '//epigraph/attribution', output, 1 + author = attribution.children.first + assert_equal 'Famous Poet', author.text.strip end test 'multi-stanza verse block' do - input = <<-EOS -[verse] -____ -A famous verse. - -Stanza two. -____ - EOS - output = render_string input - assert_xpath '//*[@class = "verseblock"]', output, 1 - assert_xpath '//*[@class = "verseblock"]/pre', output, 1 - assert_xpath '//*[@class = "verseblock"]//p', output, 0 - assert_xpath '//*[@class = "verseblock"]/pre[contains(text(), "A famous verse.")]', output, 1 - assert_xpath '//*[@class = "verseblock"]/pre[contains(text(), "Stanza two.")]', output, 1 + input = <<~'EOS' + [verse] + ____ + A famous verse. + + Stanza two. + ____ + EOS + output = convert_string input + assert_xpath '//*[@class="verseblock"]', output, 1 + assert_xpath '//*[@class="verseblock"]/pre', output, 1 + assert_xpath '//*[@class="verseblock"]//p', output, 0 + assert_xpath '//*[@class="verseblock"]/pre[contains(text(), "A famous verse.")]', output, 1 + assert_xpath '//*[@class="verseblock"]/pre[contains(text(), "Stanza two.")]', output, 1 end test 'verse block does not contain block elements' do - input = <<-EOS -[verse] -____ -A famous verse. - -.... -not a literal -.... -____ + input = <<~'EOS' + [verse] + ____ + A famous verse. + + .... + not a literal + .... + ____ EOS - output = render_string input + output = convert_string input assert_css '.verseblock', output, 1 assert_css '.verseblock > pre', output, 1 assert_css '.verseblock p', output, 0 @@ -467,1343 +694,1901 @@ end test 'verse should have normal subs' do - input = <<-EOS -[verse] -____ -A famous verse -____ + input = <<~'EOS' + [verse] + ____ + A famous verse + ____ EOS verse = block_from_string input - assert_equal Asciidoctor::Substitutors::SUBS[:normal], verse.subs + assert_equal Asciidoctor::Substitutors::NORMAL_SUBS, verse.subs end test 'should not recognize callouts in a verse' do - input = <<-EOS -[verse] -____ -La la la <1> -____ -<1> Not pointing to a callout + input = <<~'EOS' + [verse] + ____ + La la la <1> + ____ + <1> Not pointing to a callout EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//pre[text()="La la la <1>"]', output, 1 + assert_message @logger, :WARN, ': line 5: no callout found for <1>', Hash end test 'should perform normal subs on a verse block' do - input = <<-EOS -[verse] -____ -_GET /groups/link:#group-id[\{group-id\}]_ -____ + input = <<~'EOS' + [verse] + ____ + _GET /groups/link:#group-id[\{group-id\}]_ + ____ EOS - output = render_embedded_string input - assert output.include?('
    GET /groups/{group-id}
    ') + output = convert_string_to_embedded input + assert_includes output, '
    GET /groups/{group-id}
    ' end end context "Example Blocks" do - test "can render example block" do - input = <<-EOS -==== -This is an example of an example block. + test "can convert example block" do + input = <<~'EOS' + ==== + This is an example of an example block. -How crazy is that? -==== + How crazy is that? + ==== EOS - output = render_string input + output = convert_string input assert_xpath '//*[@class="exampleblock"]//p', output, 2 end - test "assigns sequential numbered caption to example block with title" do - input = <<-EOS -.Writing Docs with AsciiDoc -==== -Here's how you write AsciiDoc. - -You just write. -==== - -.Writing Docs with DocBook -==== -Here's how you write DocBook. + test 'assigns sequential numbered caption to example block with title' do + input = <<~'EOS' + .Writing Docs with AsciiDoc + ==== + Here's how you write AsciiDoc. + + You just write. + ==== + + .Writing Docs with DocBook + ==== + Here's how you write DocBook. -You futz with XML. -==== + You futz with XML. + ==== EOS doc = document_from_string input - output = doc.render + assert_equal 1, doc.blocks[0].numeral + assert_equal 1, doc.blocks[0].number + assert_equal 2, doc.blocks[1].numeral + assert_equal 2, doc.blocks[1].number + output = doc.convert assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Example 1. Writing Docs with AsciiDoc"]', output, 1 assert_xpath '(//*[@class="exampleblock"])[2]/*[@class="title"][text()="Example 2. Writing Docs with DocBook"]', output, 1 assert_equal 2, doc.attributes['example-number'] end - test "assigns sequential character caption to example block with title" do - input = <<-EOS -:example-number: @ - -.Writing Docs with AsciiDoc -==== -Here's how you write AsciiDoc. - -You just write. -==== - -.Writing Docs with DocBook -==== -Here's how you write DocBook. + test 'assigns sequential character caption to example block with title' do + input = <<~'EOS' + :example-number: @ + + .Writing Docs with AsciiDoc + ==== + Here's how you write AsciiDoc. + + You just write. + ==== + + .Writing Docs with DocBook + ==== + Here's how you write DocBook. -You futz with XML. -==== + You futz with XML. + ==== EOS doc = document_from_string input - output = doc.render + assert_equal 'A', doc.blocks[0].numeral + assert_equal 'A', doc.blocks[0].number + assert_equal 'B', doc.blocks[1].numeral + assert_equal 'B', doc.blocks[1].number + output = doc.convert assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Example A. Writing Docs with AsciiDoc"]', output, 1 assert_xpath '(//*[@class="exampleblock"])[2]/*[@class="title"][text()="Example B. Writing Docs with DocBook"]', output, 1 assert_equal 'B', doc.attributes['example-number'] end test "explicit caption is used if provided" do - input = <<-EOS -[caption="Look! "] -.Writing Docs with AsciiDoc -==== -Here's how you write AsciiDoc. + input = <<~'EOS' + [caption="Look! "] + .Writing Docs with AsciiDoc + ==== + Here's how you write AsciiDoc. -You just write. -==== + You just write. + ==== EOS doc = document_from_string input - output = doc.render + assert_nil doc.blocks[0].numeral + output = doc.convert assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Look! Writing Docs with AsciiDoc"]', output, 1 - assert !doc.attributes.has_key?('example-number') - end - - test 'explicit caption is set on block even if block has no title' do - input = <<-EOS -[caption="Look!"] -==== -Just write. -==== - EOS - - doc = document_from_string input - assert_equal 'Look!', doc.blocks.first.caption - output = doc.render - refute_match(/Look/, output) + refute doc.attributes.has_key?('example-number') end test 'automatic caption can be turned off and on and modified' do - input = <<-EOS -.first example -==== -an example -==== + input = <<~'EOS' + .first example + ==== + an example + ==== -:caption: + :caption: -.second example -==== -another example -==== + .second example + ==== + another example + ==== -:caption!: -:example-caption: Exhibit + :caption!: + :example-caption: Exhibit -.third example -==== -yet another example -==== + .third example + ==== + yet another example + ==== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/*[@class="exampleblock"]', output, 3 assert_xpath '(/*[@class="exampleblock"])[1]/*[@class="title"][starts-with(text(), "Example ")]', output, 1 assert_xpath '(/*[@class="exampleblock"])[2]/*[@class="title"][text()="second example"]', output, 1 assert_xpath '(/*[@class="exampleblock"])[3]/*[@class="title"][starts-with(text(), "Exhibit ")]', output, 1 end + + test 'should create details/summary set if collapsible option is set' do + input = <<~'EOS' + .Toggle Me + [%collapsible] + ==== + This content is revealed when the user clicks the words "Toggle Me". + ==== + EOS + + output = convert_string_to_embedded input + assert_css 'details', output, 1 + assert_css 'details[open]', output, 0 + assert_css 'details > summary.title', output, 1 + assert_xpath '//details/summary[text()="Toggle Me"]', output, 1 + assert_css 'details > summary.title + .content', output, 1 + assert_css 'details > summary.title + .content p', output, 1 + end + + test 'should open details/summary set if collapsible and open options are set' do + input = <<~'EOS' + .Toggle Me + [%collapsible%open] + ==== + This content is revealed when the user clicks the words "Toggle Me". + ==== + EOS + + output = convert_string_to_embedded input + assert_css 'details', output, 1 + assert_css 'details[open]', output, 1 + assert_css 'details > summary.title', output, 1 + assert_xpath '//details/summary[text()="Toggle Me"]', output, 1 + end + + test 'should add default summary element if collapsible option is set and title is not specifed' do + input = <<~'EOS' + [%collapsible] + ==== + This content is revealed when the user clicks the words "Toggle Me". + ==== + EOS + + output = convert_string_to_embedded input + assert_css 'details', output, 1 + assert_css 'details > summary.title', output, 1 + assert_xpath '//details/summary[text()="Details"]', output, 1 + end + + test 'should warn if example block is not terminated' do + input = <<~'EOS' + outside + + ==== + inside + + still inside + + eof + EOS + + output = convert_string_to_embedded input + assert_xpath '/*[@class="exampleblock"]', output, 1 + assert_message @logger, :WARN, ': line 3: unterminated example block', Hash + end end context 'Admonition Blocks' do test 'caption block-level attribute should be used as caption' do - input = <<-EOS -:tip-caption: Pro Tip + input = <<~'EOS' + :tip-caption: Pro Tip -[caption="Pro Tip"] -TIP: Override the caption of an admonition block using an attribute entry - EOS + [caption="Pro Tip"] + TIP: Override the caption of an admonition block using an attribute entry + EOS - output = render_embedded_string input - assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Pro Tip"]', output, 1 + output = convert_string_to_embedded input + assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Pro Tip"]', output, 1 end test 'can override caption of admonition block using document attribute' do - input = <<-EOS -:tip-caption: Pro Tip + input = <<~'EOS' + :tip-caption: Pro Tip -TIP: Override the caption of an admonition block using an attribute entry - EOS + TIP: Override the caption of an admonition block using an attribute entry + EOS - output = render_embedded_string input - assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Pro Tip"]', output, 1 + output = convert_string_to_embedded input + assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Pro Tip"]', output, 1 end test 'blank caption document attribute should not blank admonition block caption' do - input = <<-EOS -:caption: + input = <<~'EOS' + :caption: -TIP: Override the caption of an admonition block using an attribute entry - EOS + TIP: Override the caption of an admonition block using an attribute entry + EOS - output = render_embedded_string input - assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Tip"]', output, 1 + output = convert_string_to_embedded input + assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Tip"]', output, 1 end end context "Preformatted Blocks" do test 'should separate adjacent paragraphs and listing into blocks' do - input = <<-EOS -paragraph 1 ----- -listing content ----- -paragraph 2 + input = <<~'EOS' + paragraph 1 + ---- + listing content + ---- + paragraph 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/*[@class="paragraph"]/p', output, 2 assert_xpath '/*[@class="listingblock"]', output, 1 assert_xpath '(/*[@class="paragraph"]/following-sibling::*)[1][@class="listingblock"]', output, 1 end - test "should preserve endlines in literal block" do - input = <<-EOS -.... -line one - -line two - -line three -.... -EOS - [true, false].each {|header_footer| - output = render_string input, :header_footer => header_footer + test 'should warn if listing block is not terminated' do + input = <<~'EOS' + outside + + ---- + inside + + still inside + + eof + EOS + + output = convert_string_to_embedded input + assert_xpath '/*[@class="listingblock"]', output, 1 + assert_message @logger, :WARN, ': line 3: unterminated listing block', Hash + end + + test 'should not crash if listing block has no lines' do + input = <<~'EOS' + ---- + ---- + EOS + output = convert_string_to_embedded input + assert_css 'pre', output, 1 + assert_css 'pre:empty', output, 1 + end + + test 'should preserve newlines in literal block' do + input = <<~'EOS' + .... + line one + + line two + + line three + .... + EOS + [true, false].each do |standalone| + output = convert_string input, standalone: standalone assert_xpath '//pre', output, 1 assert_xpath '//pre/text()', output, 1 text = xmlnodes_at_xpath('//pre/text()', output, 1).text - lines = text.lines.entries + lines = text.lines assert_equal 5, lines.size - expected = "line one\n\nline two\n\nline three".lines.entries + expected = "line one\n\nline two\n\nline three".lines assert_equal expected, lines blank_lines = output.scan(/\n[ \t]*\n/).size assert blank_lines >= 2 - } + end end - test "should preserve endlines in listing block" do - input = <<-EOS -[source] ----- -line one - -line two - -line three ----- -EOS - [true, false].each {|header_footer| - output = render_string input, header_footer => header_footer - assert_xpath '//pre/code', output, 1 - assert_xpath '//pre/code/text()', output, 1 - text = xmlnodes_at_xpath('//pre/code/text()', output, 1).text - lines = text.lines.entries + test 'should preserve newlines in listing block' do + input = <<~'EOS' + ---- + line one + + line two + + line three + ---- + EOS + [true, false].each do |standalone| + output = convert_string input, standalone: standalone + assert_xpath '//pre', output, 1 + assert_xpath '//pre/text()', output, 1 + text = xmlnodes_at_xpath('//pre/text()', output, 1).text + lines = text.lines assert_equal 5, lines.size - expected = "line one\n\nline two\n\nline three".lines.entries + expected = "line one\n\nline two\n\nline three".lines assert_equal expected, lines blank_lines = output.scan(/\n[ \t]*\n/).size assert blank_lines >= 2 - } + end end - test "should preserve endlines in verse block" do - input = <<-EOS --- -[verse] -____ -line one - -line two - -line three -____ --- -EOS - [true, false].each {|header_footer| - output = render_string input, :header_footer => header_footer + test 'should preserve newlines in verse block' do + input = <<~'EOS' + -- + [verse] + ____ + line one + + line two + + line three + ____ + -- + EOS + [true, false].each do |standalone| + output = convert_string input, standalone: standalone assert_xpath '//*[@class="verseblock"]/pre', output, 1 assert_xpath '//*[@class="verseblock"]/pre/text()', output, 1 text = xmlnodes_at_xpath('//*[@class="verseblock"]/pre/text()', output, 1).text - lines = text.lines.entries + lines = text.lines assert_equal 5, lines.size - expected = "line one\n\nline two\n\nline three".lines.entries + expected = "line one\n\nline two\n\nline three".lines assert_equal expected, lines blank_lines = output.scan(/\n[ \t]*\n/).size assert blank_lines >= 2 - } + end end - test 'should strip leading and trailing blank lines when rendering verbatim block' do - input = <<-EOS -[subs="attributes"] -.... + test 'should strip leading and trailing blank lines when converting verbatim block' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [subs="attributes"] + .... - first line + first line -last line + last line -{empty} + {empty} -.... + .... EOS - doc = document_from_string input, :header_footer => false + doc = document_from_string input, standalone: false block = doc.blocks.first assert_equal ['', '', ' first line', '', 'last line', '', '{empty}', ''], block.lines - result = doc.render + result = doc.convert assert_xpath %(//pre[text()=" first line\n\nlast line"]), result, 1 end - test 'should process block with CRLF endlines' do - input = <<-EOS -[source]\r -----\r -source line 1\r -source line 2\r -----\r + test 'should process block with CRLF line endings' do + input = <<~EOS + ----\r + source line 1\r + source line 2\r + ----\r EOS - output = render_embedded_string input - refute_match(/\[source\]/, output) + output = convert_string_to_embedded input assert_xpath '/*[@class="listingblock"]//pre', output, 1 - assert_xpath '/*[@class="listingblock"]//pre/code', output, 1 - assert_xpath %(/*[@class="listingblock"]//pre/code[text()="source line 1\nsource line 2"]), output, 1 + assert_xpath %(/*[@class="listingblock"]//pre[text()="source line 1\nsource line 2"]), output, 1 end test 'should remove block indent if indent attribute is 0' do - input = <<-EOS -[indent="0"] ----- - def names + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [indent="0"] + ---- + def names - @names.split ' ' + @names.split - end ----- + end + ---- EOS - expected = <<-EOS -def names + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + def names - @names.split ' ' + @names.split -end + end EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text - assert_equal expected.chomp, result + assert_equal expected, result end test 'should not remove block indent if indent attribute is -1' do - input = <<-EOS -[indent="-1"] ----- - def names + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [indent="-1"] + ---- + def names - @names.split ' ' + @names.split - end ----- + end + ---- EOS - expected = <<-EOS - def names - - @names.split ' ' + expected = (input.lines.slice 2, 5).join.chop - end - EOS - - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text - assert_equal expected.chomp, result + assert_equal expected, result end test 'should set block indent to value specified by indent attribute' do - input = <<-EOS -[indent="1"] ----- - def names + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [indent="1"] + ---- + def names - @names.split ' ' + @names.split - end ----- + end + ---- EOS - expected = <<-EOS - def names - - @names.split ' ' + expected = (input.lines.slice 2, 5).map {|l| l.sub ' ', ' ' }.join.chop - end - EOS - - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text - assert_equal expected.chomp, result + assert_equal expected, result end test 'should set block indent to value specified by indent document attribute' do - input = <<-EOS -:source-indent: 1 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + :source-indent: 1 -[source,ruby] ----- - def names + [source,ruby] + ---- + def names - @names.split ' ' + @names.split - end ----- + end + ---- EOS - expected = <<-EOS - def names - - @names.split ' ' + expected = (input.lines.slice 4, 5).map {|l| l.sub ' ', ' '}.join.chop - end - EOS - - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text - assert_equal expected.chomp, result + assert_equal expected, result end test 'should expand tabs if tabsize attribute is positive' do - input = <<-EOS -:tabsize: 4 + input = <<~EOS + :tabsize: 4 -[indent=0] ----- - def names + [indent=0] + ---- + \tdef names - @names.split ' ' + \t\t@names.split - end ----- + \tend + ---- EOS - expected = <<-EOS -def names + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + def names - @names.split ' ' + @names.split -end + end EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text - assert_equal expected.chomp, result + assert_equal expected, result end test 'literal block should honor nowrap option' do - input = <<-EOS -[options="nowrap"] ----- -Do not wrap me if I get too long. ----- + input = <<~'EOS' + [options="nowrap"] + ---- + Do not wrap me if I get too long. + ---- EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'pre.nowrap', output, 1 end test 'literal block should set nowrap class if prewrap document attribute is disabled' do - input = <<-EOS -:prewrap!: + input = <<~'EOS' + :prewrap!: ----- -Do not wrap me if I get too long. ----- + ---- + Do not wrap me if I get too long. + ---- EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'pre.nowrap', output, 1 end test 'literal block should honor explicit subs list' do - input = <<-EOS -[subs="verbatim,quotes"] ----- -Map *attributes*; //<1> ----- + input = <<~'EOS' + [subs="verbatim,quotes"] + ---- + Map *attributes*; //<1> + ---- EOS block = block_from_string input assert_equal [:specialcharacters,:callouts,:quotes], block.subs - output = block.render - assert output.include?('Map<String, String> attributes;') + output = block.convert + assert_includes output, 'Map<String, String> attributes;' assert_xpath '//pre/b[text()="(1)"]', output, 1 end test 'should be able to disable callouts for literal block' do - input = <<-EOS -[subs="specialcharacters"] ----- -No callout here <1> ----- + input = <<~'EOS' + [subs="specialcharacters"] + ---- + No callout here <1> + ---- EOS block = block_from_string input assert_equal [:specialcharacters], block.subs - output = block.render + output = block.convert assert_xpath '//pre/b[text()="(1)"]', output, 0 end test 'listing block should honor explicit subs list' do - input = <<-EOS -[subs="specialcharacters,quotes"] ----- -$ *python functional_tests.py* -Traceback (most recent call last): - File "functional_tests.py", line 4, in - assert 'Django' in browser.title -AssertionError ----- + input = <<~'EOS' + [subs="specialcharacters,quotes"] + ---- + $ *python functional_tests.py* + Traceback (most recent call last): + File "functional_tests.py", line 4, in + assert 'Django' in browser.title + AssertionError + ---- EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.listingblock pre', output, 1 assert_css '.listingblock pre strong', output, 1 assert_css '.listingblock pre em', output, 0 - input2 = <<-EOS -[subs="specialcharacters,macros"] ----- -$ pass:quotes[*python functional_tests.py*] -Traceback (most recent call last): - File "functional_tests.py", line 4, in - assert pass:quotes['Django'] in browser.title -AssertionError ----- + input2 = <<~'EOS' + [subs="specialcharacters,macros"] + ---- + $ pass:quotes[*python functional_tests.py*] + Traceback (most recent call last): + File "functional_tests.py", line 4, in + assert pass:quotes['Django'] in browser.title + AssertionError + ---- EOS - output2 = render_embedded_string input2 - # FIXME JRuby is adding extra trailing endlines in the second document, + output2 = convert_string_to_embedded input2 + # FIXME JRuby is adding extra trailing newlines in the second document, # for now, rstrip is necessary assert_equal output.rstrip, output2.rstrip end + test 'first character of block title may be a period if not followed by space' do + input = <<~'EOS' + ..gitignore + ---- + /.bundle/ + /build/ + /Gemfile.lock + ---- + EOS + + output = convert_string_to_embedded input + assert_xpath '//*[@class="title"][text()=".gitignore"]', output + end + test 'listing block without title should generate screen element in docbook' do - input = <<-EOS ----- -listing block ----- + input = <<~'EOS' + ---- + listing block + ---- EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/screen[text()="listing block"]', output, 1 end test 'listing block with title should generate screen element inside formalpara element in docbook' do - input = <<-EOS -.title ----- -listing block ----- + input = <<~'EOS' + .title + ---- + listing block + ---- EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/formalpara', output, 1 assert_xpath '/formalpara/title[text()="title"]', output, 1 assert_xpath '/formalpara/para/screen[text()="listing block"]', output, 1 end + test 'listing block without an explicit style and with a second positional argument should be promoted to a source block' do + input = <<~'EOS' + [,ruby] + ---- + puts 'Hello, Ruby!' + ---- + EOS + matches = (document_from_string input).find_by context: :listing, style: 'source' + assert_equal 1, matches.length + assert_equal 'ruby', (matches[0].attr 'language') + end + + test 'listing block without an explicit style should be promoted to a source block if source-language is set' do + input = <<~'EOS' + :source-language: ruby + + ---- + puts 'Hello, Ruby!' + ---- + EOS + matches = (document_from_string input).find_by context: :listing, style: 'source' + assert_equal 1, matches.length + assert_equal 'ruby', (matches[0].attr 'language') + end + + test 'listing block with an explicit style and a second positional argument should not be promoted to a source block' do + input = <<~'EOS' + [listing,ruby] + ---- + puts 'Hello, Ruby!' + ---- + EOS + matches = (document_from_string input).find_by context: :listing + assert_equal 1, matches.length + assert_equal 'listing', matches[0].style + assert_nil (matches[0].attr 'language') + end + + test 'listing block with an explicit style should not be promoted to a source block if source-language is set' do + input = <<~'EOS' + :source-language: ruby + + [listing] + ---- + puts 'Hello, Ruby!' + ---- + EOS + matches = (document_from_string input).find_by context: :listing + assert_equal 1, matches.length + assert_equal 'listing', matches[0].style + assert_nil (matches[0].attr 'language') + end + test 'source block with no title or language should generate screen element in docbook' do - input = <<-EOS -[source] ----- -listing block ----- + input = <<~'EOS' + [source] + ---- + source block + ---- EOS - output = render_embedded_string input, :backend => 'docbook' - assert_xpath '/screen[text()="listing block"]', output, 1 + output = convert_string_to_embedded input, backend: 'docbook' + assert_xpath '/screen[@linenumbering="unnumbered"][text()="source block"]', output, 1 end - test 'source block with title and no language should generate screen element inside formalpara element in docbook' do - input = <<-EOS -[source] -.title ----- -listing block ----- + test 'source block with title and no language should generate screen element inside formalpara element for docbook' do + input = <<~'EOS' + [source] + .title + ---- + source block + ---- EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/formalpara', output, 1 assert_xpath '/formalpara/title[text()="title"]', output, 1 - assert_xpath '/formalpara/para/screen[text()="listing block"]', output, 1 + assert_xpath '/formalpara/para/screen[@linenumbering="unnumbered"][text()="source block"]', output, 1 end end context "Open Blocks" do - test "can render open block" do - input = <<-EOS --- -This is an open block. + test "can convert open block" do + input = <<~'EOS' + -- + This is an open block. -It can span multiple lines. --- + It can span multiple lines. + -- EOS - output = render_string input + output = convert_string input assert_xpath '//*[@class="openblock"]//p', output, 2 end test "open block can contain another block" do - input = <<-EOS --- -This is an open block. + input = <<~'EOS' + -- + This is an open block. -It can span multiple lines. + It can span multiple lines. -____ -It can hold great quotes like this one. -____ --- + ____ + It can hold great quotes like this one. + ____ + -- EOS - output = render_string input + output = convert_string input assert_xpath '//*[@class="openblock"]//p', output, 3 assert_xpath '//*[@class="openblock"]//*[@class="quoteblock"]', output, 1 end + + test 'should transfer id and reftext on open block to DocBook output' do + input = <<~'EOS' + Check out that <>! + + [[open,Open Block]] + -- + This is an open block. + + TIP: An open block can have other blocks inside of it. + -- + + Back to our regularly scheduled programming. + EOS + + output = convert_string input, backend: :docbook, keep_namespaces: true + assert_css 'article:root > para[xml|id="open"]', output, 1 + assert_css 'article:root > para[xreflabel="Open Block"]', output, 1 + assert_css 'article:root > simpara', output, 2 + assert_css 'article:root > para', output, 1 + assert_css 'article:root > para > simpara', output, 1 + assert_css 'article:root > para > tip', output, 1 + end + + test 'should transfer id and reftext on open paragraph to DocBook output' do + input = <<~'EOS' + [open#openpara,reftext="Open Paragraph"] + This is an open paragraph. + EOS + + output = convert_string input, backend: :docbook, keep_namespaces: true + assert_css 'article:root > simpara', output, 1 + assert_css 'article:root > simpara[xml|id="openpara"]', output, 1 + assert_css 'article:root > simpara[xreflabel="Open Paragraph"]', output, 1 + end + + test 'should transfer title on open block to DocBook output' do + input = <<~'EOS' + .Behold the open + -- + This is an open block with a title. + -- + EOS + + output = convert_string input, backend: :docbook + assert_css 'article > formalpara', output, 1 + assert_css 'article > formalpara > *', output, 2 + assert_css 'article > formalpara > title', output, 1 + assert_xpath '/article/formalpara/title[text()="Behold the open"]', output, 1 + assert_css 'article > formalpara > para', output, 1 + assert_css 'article > formalpara > para > simpara', output, 1 + end + + test 'should transfer title on open paragraph to DocBook output' do + input = <<~'EOS' + .Behold the open + This is an open paragraph with a title. + EOS + + output = convert_string input, backend: :docbook + assert_css 'article > formalpara', output, 1 + assert_css 'article > formalpara > *', output, 2 + assert_css 'article > formalpara > title', output, 1 + assert_xpath '/article/formalpara/title[text()="Behold the open"]', output, 1 + assert_css 'article > formalpara > para', output, 1 + assert_css 'article > formalpara > para[text()="This is an open paragraph with a title."]', output, 1 + end + + test 'should transfer role on open block to DocBook output' do + input = <<~'EOS' + [.container] + -- + This is an open block. + It holds stuff. + -- + EOS + + output = convert_string input, backend: :docbook + assert_css 'article > para[role=container]', output, 1 + assert_css 'article > para[role=container] > simpara', output, 1 + end + + test 'should transfer role on open paragraph to DocBook output' do + input = <<~'EOS' + [.container] + This is an open block. + It holds stuff. + EOS + + output = convert_string input, backend: :docbook + assert_css 'article > simpara[role=container]', output, 1 + end end context 'Passthrough Blocks' do test 'can parse a passthrough block' do - input = <<-EOS -++++ -This is a passthrough block. -++++ + input = <<~'EOS' + ++++ + This is a passthrough block. + ++++ EOS block = block_from_string input - assert !block.nil? + refute_nil block assert_equal 1, block.lines.size assert_equal 'This is a passthrough block.', block.source end test 'does not perform subs on a passthrough block by default' do - input = <<-EOS -:type: passthrough + input = <<~'EOS' + :type: passthrough -++++ -This is a '{type}' block. -http://asciidoc.org -image:tiger.png[] -++++ + ++++ + This is a '{type}' block. + http://asciidoc.org + image:tiger.png[] + ++++ EOS expected = %(This is a '{type}' block.\nhttp://asciidoc.org\nimage:tiger.png[]) - output = render_embedded_string input + output = convert_string_to_embedded input assert_equal expected, output.strip end test 'does not perform subs on a passthrough block with pass style by default' do - input = <<-EOS -:type: passthrough + input = <<~'EOS' + :type: passthrough -[pass] -++++ -This is a '{type}' block. -http://asciidoc.org -image:tiger.png[] -++++ + [pass] + ++++ + This is a '{type}' block. + http://asciidoc.org + image:tiger.png[] + ++++ EOS expected = %(This is a '{type}' block.\nhttp://asciidoc.org\nimage:tiger.png[]) - output = render_embedded_string input + output = convert_string_to_embedded input assert_equal expected, output.strip end test 'passthrough block honors explicit subs list' do - input = <<-EOS -:type: passthrough + input = <<~'EOS' + :type: passthrough -[subs="attributes,quotes,macros"] -++++ -This is a _{type}_ block. -http://asciidoc.org -++++ + [subs="attributes,quotes,macros"] + ++++ + This is a _{type}_ block. + http://asciidoc.org + ++++ EOS expected = %(This is a passthrough block.\nhttp://asciidoc.org) - output = render_embedded_string input + output = convert_string_to_embedded input assert_equal expected, output.strip end - test 'should strip leading and trailing blank lines when rendering raw block' do - input = <<-EOS -++++ -line above -++++ + test 'should strip leading and trailing blank lines when converting raw block' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + ++++ + line above + ++++ -++++ + ++++ - first line + first line -last line + last line -++++ + ++++ -++++ -line below -++++ + ++++ + line below + ++++ EOS - doc = document_from_string input, :header_footer => false + doc = document_from_string input, standalone: false block = doc.blocks[1] assert_equal ['', '', ' first line', '', 'last line', '', ''], block.lines - result = doc.render + result = doc.convert assert_equal "line above\n first line\n\nlast line\nline below", result, 1 end end context 'Math blocks' do + test 'should not crash when converting to HTML if stem block is empty' do + input = <<~'EOS' + [stem] + ++++ + ++++ + EOS + + output = convert_string_to_embedded input + assert_css '.stemblock', output, 1 + end + test 'should add LaTeX math delimiters around latexmath block content' do - input = <<-'EOS' -[latexmath] -++++ -\sqrt{3x-1}+(1+x)^2 < y -++++ + input = <<~'EOS' + [latexmath] + ++++ + \sqrt{3x-1}+(1+x)^2 < y + ++++ EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.stemblock', output, 1 - nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 + nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip end test 'should not add LaTeX math delimiters around latexmath block content if already present' do - input = <<-'EOS' -[latexmath] -++++ -\[\sqrt{3x-1}+(1+x)^2 < y\] -++++ + input = <<~'EOS' + [latexmath] + ++++ + \[\sqrt{3x-1}+(1+x)^2 < y\] + ++++ EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.stemblock', output, 1 - nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 + nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip end - test 'should render latexmath block in alt of equation in DocBook backend' do - input = <<-'EOS' -[latexmath] -++++ -\sqrt{3x-1}+(1+x)^2 < y -++++ - EOS - - expect = <<-'EOS' - - - - + test 'should display latexmath block in alt of equation in DocBook backend' do + input = <<~'EOS' + [latexmath] + ++++ + \sqrt{3x-1}+(1+x)^2 < y + ++++ + EOS + + expect = <<~'EOS' + + + + EOS - output = render_embedded_string input, :backend => :docbook + output = convert_string_to_embedded input, backend: :docbook assert_equal expect.strip, output.strip end - test 'should add AsciiMath delimiters around asciimath block content' do - input = <<-'EOS' -[asciimath] -++++ -sqrt(3x-1)+(1+x)^2 < y -++++ + test 'should not split equation in AsciiMath block at single newline' do + input = <<~'EOS' + [asciimath] + ++++ + f: bbb"N" -> bbb"N" + f: x |-> x + 1 + ++++ + EOS + expected = <<~'EOS'.chop + \$f: bbb"N" -> bbb"N" + f: x |-> x + 1\$ EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.stemblock', output, 1 - nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 - assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip + nodes = xmlnodes_at_xpath '//*[@class="content"]', output + assert_equal expected, nodes.first.inner_html.strip end - test 'should not add AsciiMath delimiters around asciimath block content if already present' do - input = <<-'EOS' -[asciimath] -++++ -\$sqrt(3x-1)+(1+x)^2 < y\$ -++++ + test 'should split equation in AsciiMath block at escaped newline' do + input = <<~'EOS' + [asciimath] + ++++ + f: bbb"N" -> bbb"N" \ + f: x |-> x + 1 + ++++ + EOS + expected = <<~'EOS'.chop + \$f: bbb"N" -> bbb"N"\$
    + \$f: x |-> x + 1\$ + EOS + + output = convert_string_to_embedded input + assert_css '.stemblock', output, 1 + nodes = xmlnodes_at_xpath '//*[@class="content"]', output + assert_equal expected, nodes.first.inner_html.strip + end + + test 'should split equation in AsciiMath block at sequence of escaped newlines' do + input = <<~'EOS' + [asciimath] + ++++ + f: bbb"N" -> bbb"N" \ + \ + f: x |-> x + 1 + ++++ + EOS + expected = <<~'EOS'.chop + \$f: bbb"N" -> bbb"N"\$
    +
    + \$f: x |-> x + 1\$ + EOS + + output = convert_string_to_embedded input + assert_css '.stemblock', output, 1 + nodes = xmlnodes_at_xpath '//*[@class="content"]', output + assert_equal expected, nodes.first.inner_html.strip + end + + test 'should split equation in AsciiMath block at newline sequence and preserve breaks' do + input = <<~'EOS' + [asciimath] + ++++ + f: bbb"N" -> bbb"N" + + + f: x |-> x + 1 + ++++ + EOS + expected = <<~'EOS'.chop + \$f: bbb"N" -> bbb"N"\$
    +
    +
    + \$f: x |-> x + 1\$ + EOS + + output = convert_string_to_embedded input + assert_css '.stemblock', output, 1 + nodes = xmlnodes_at_xpath '//*[@class="content"]', output + assert_equal expected, nodes.first.inner_html.strip + end + + test 'should add AsciiMath delimiters around asciimath block content' do + input = <<~'EOS' + [asciimath] + ++++ + sqrt(3x-1)+(1+x)^2 < y + ++++ EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.stemblock', output, 1 - nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 + nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip end - test 'should render asciimath block in textobject of equation in DocBook backend' do - input = <<-'EOS' -[asciimath] -++++ -x+b/(2a)<+-sqrt((b^2)/(4a^2)-c/a) -++++ + test 'should not add AsciiMath delimiters around asciimath block content if already present' do + input = <<~'EOS' + [asciimath] + ++++ + \$sqrt(3x-1)+(1+x)^2 < y\$ + ++++ EOS - expect = %( -x+b2a<±b24a2ca -) + output = convert_string_to_embedded input + assert_css '.stemblock', output, 1 + nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output + assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip + end - output = render_embedded_string input, :backend => :docbook - assert_equal expect.strip, output.strip + test 'should convert contents of asciimath block to MathML in DocBook output if asciimath gem is available' do + asciimath_available = !(Asciidoctor::Helpers.require_library 'asciimath', true, :ignore).nil? + input = <<~'EOS' + [asciimath] + ++++ + x+b/(2a)<+-sqrt((b^2)/(4a^2)-c/a) + ++++ + + [asciimath] + ++++ + ++++ + EOS + + expect = <<~'EOS'.chop + + x+b2a<±b24a2ca + + + + + EOS + + using_memory_logger do |logger| + doc = document_from_string input, backend: :docbook, standalone: false + actual = doc.convert + if asciimath_available + assert_equal expect, actual.strip + assert_equal :loaded, doc.converter.instance_variable_get(:@asciimath_status) + else + assert_message logger, :WARN, 'optional gem \'asciimath\' is not available. Functionality disabled.' + assert_equal :unavailable, doc.converter.instance_variable_get(:@asciimath_status) + end + end end test 'should output title for latexmath block if defined' do - input = <<-'EOS' -.The Lorenz Equations -[latexmath] -++++ -\begin{aligned} -\dot{x} & = \sigma(y-x) \\ -\dot{y} & = \rho x - y - xz \\ -\dot{z} & = -\beta z + xy -\end{aligned} -++++ + input = <<~'EOS' + .The Lorenz Equations + [latexmath] + ++++ + \begin{aligned} + \dot{x} & = \sigma(y-x) \\ + \dot{y} & = \rho x - y - xz \\ + \dot{z} & = -\beta z + xy + \end{aligned} + ++++ EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.stemblock', output, 1 assert_css '.stemblock .title', output, 1 assert_xpath '//*[@class="title"][text()="The Lorenz Equations"]', output, 1 end test 'should output title for asciimath block if defined' do - input = <<-'EOS' -.Simple fraction -[asciimath] -++++ -a//b -++++ + input = <<~'EOS' + .Simple fraction + [asciimath] + ++++ + a//b + ++++ EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.stemblock', output, 1 assert_css '.stemblock .title', output, 1 assert_xpath '//*[@class="title"][text()="Simple fraction"]', output, 1 end - test 'should add AsciiMath delimiters around stem block content if stem attribute != latexmath' do - input = <<-'EOS' -[stem] -++++ -sqrt(3x-1)+(1+x)^2 < y -++++ + test 'should add AsciiMath delimiters around stem block content if stem attribute is asciimath, empty, or not set' do + input = <<~'EOS' + [stem] + ++++ + sqrt(3x-1)+(1+x)^2 < y + ++++ EOS [ {}, - {'stem' => ''}, - {'stem' => 'asciimath'} + { 'stem' => '' }, + { 'stem' => 'asciimath' }, + { 'stem' => 'bogus' }, ].each do |attributes| - output = render_embedded_string input, :attributes => attributes + output = convert_string_to_embedded input, attributes: attributes assert_css '.stemblock', output, 1 - nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 + nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip end end - test 'should add LaTeX math delimiters around stem block content if stem attribute is latexmath' do - input = <<-'EOS' -[stem] -++++ -\sqrt{3x-1}+(1+x)^2 < y -++++ + test 'should add LaTeX math delimiters around stem block content if stem attribute is latexmath, latex, or tex' do + input = <<~'EOS' + [stem] + ++++ + \sqrt{3x-1}+(1+x)^2 < y + ++++ EOS - output = render_embedded_string input, :attributes => {'stem' => 'latexmath'} + [ + { 'stem' => 'latexmath' }, + { 'stem' => 'latex' }, + { 'stem' => 'tex' }, + ].each do |attributes| + output = convert_string_to_embedded input, attributes: attributes + assert_css '.stemblock', output, 1 + nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output + assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip + end + end + + test 'should allow stem style to be set using second positional argument of block attributes' do + input = <<~'EOS' + :stem: latexmath + + [stem,asciimath] + ++++ + sqrt(3x-1)+(1+x)^2 < y + ++++ + EOS + + doc = document_from_string input + stemblock = doc.blocks[0] + assert_equal :stem, stemblock.context + assert_equal 'asciimath', stemblock.attributes['style'] + output = doc.convert standalone: false assert_css '.stemblock', output, 1 - nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 - assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip + nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output + assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip + end + end + + context 'Custom Blocks' do + test 'should not warn if block style is unknown' do + input = <<~'EOS' + [foo] + -- + bar + -- + EOS + convert_string_to_embedded input + assert_empty @logger.messages + end + + test 'should log debug message if block style is unknown and debug level is enabled' do + input = <<~'EOS' + [foo] + -- + bar + -- + EOS + using_memory_logger Logger::Severity::DEBUG do |logger| + convert_string_to_embedded input + assert_message logger, :DEBUG, ': line 2: unknown style for open block: foo', Hash + end end end context 'Metadata' do test 'block title above section gets carried over to first block in section' do - input = <<-EOS -.Title -== Section + input = <<~'EOS' + .Title + == Section -paragraph + paragraph EOS - output = render_string input + output = convert_string input assert_xpath '//*[@class="paragraph"]', output, 1 - assert_xpath '//*[@class="paragraph"]/*[@class="title"][text() = "Title"]', output, 1 - assert_xpath '//*[@class="paragraph"]/p[text() = "paragraph"]', output, 1 + assert_xpath '//*[@class="paragraph"]/*[@class="title"][text()="Title"]', output, 1 + assert_xpath '//*[@class="paragraph"]/p[text()="paragraph"]', output, 1 end test 'block title above document title demotes document title to a section title' do - input = <<-EOS -.Block title -= Section Title - -section paragraph - EOS - output, errors = nil - redirect_streams do |stdout, stderr| - output = render_string input - errors = stderr.string - end + input = <<~'EOS' + .Block title + = Section Title + + section paragraph + EOS + output = convert_string input assert_xpath '//*[@id="header"]/*', output, 0 assert_xpath '//*[@id="preamble"]/*', output, 0 assert_xpath '//*[@id="content"]/h1[text()="Section Title"]', output, 1 assert_xpath '//*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="paragraph"]/*[@class="title"][text()="Block title"]', output, 1 - assert !errors.empty? - assert_match(/only book doctypes can contain level 0 sections/, errors) + assert_message @logger, :ERROR, ': line 2: level 0 sections can only be used when doctype is book', Hash end test 'block title above document title gets carried over to first block in first section if no preamble' do - input = <<-EOS -.Block title -= Document Title + input = <<~'EOS' + :doctype: book + .Block title + = Document Title -== First Section + == First Section -paragraph + paragraph EOS - output = render_string input - assert_xpath '//*[@class="sect1"]//*[@class="paragraph"]/*[@class="title"][text() = "Block title"]', output, 1 + doc = document_from_string input + # NOTE block title demotes document title to level-0 section + refute doc.header? + output = doc.convert + assert_xpath '//*[@class="sect1"]//*[@class="paragraph"]/*[@class="title"][text()="Block title"]', output, 1 + end + + test 'should apply substitutions to a block title in normal order' do + input = <<~'EOS' + .{link-url}[{link-text}]{tm} + The one and only! + EOS + + output = convert_string_to_embedded input, attributes: { + 'link-url' => 'https://acme.com', + 'link-text' => 'ACME', + 'tm' => '(TM)', + } + assert_css '.title', output, 1 + assert_css '.title a[href="https://acme.com"]', output, 1 + assert_xpath %(//*[@class="title"][contains(text(),"#{decode_char 8482}")]), output, 1 end test 'empty attribute list should not appear in output' do - input = <<-EOS -[] --- -Block content --- + input = <<~'EOS' + [] + -- + Block content + -- EOS - output = render_embedded_string input - assert output.include?('Block content') - assert !output.include?('[]') + output = convert_string_to_embedded input + assert_includes output, 'Block content' + refute_includes output, '[]' end test 'empty block anchor should not appear in output' do - input = <<-EOS -[[]] --- -Block content --- + input = <<~'EOS' + [[]] + -- + Block content + -- EOS - output = render_embedded_string input - assert output.include?('Block content') - assert !output.include?('[[]]') + output = convert_string_to_embedded input + assert_includes output, 'Block content' + refute_includes output, '[[]]' end end context 'Images' do - test 'can render block image with alt text defined in macro' do - input = <<-EOS -image::images/tiger.png[Tiger] - EOS - - output = render_embedded_string input + test 'can convert block image with alt text defined in macro' do + input = 'image::images/tiger.png[Tiger]' + output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end - test 'renders SVG image using img element by default' do - input = <<-EOS -image::tiger.svg[Tiger] - EOS - - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER + test 'converts SVG image using img element by default' do + input = 'image::tiger.svg[Tiger]' + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//img[@src="tiger.svg"][@alt="Tiger"]', output, 1 end - test 'renders interactive SVG image with alt text using object element' do - input = <<-EOS -:imagesdir: images + test 'converts interactive SVG image with alt text using object element' do + input = <<~'EOS' + :imagesdir: images -[%interactive] -image::tiger.svg[Tiger,100] + [%interactive] + image::tiger.svg[Tiger,100] EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="images/tiger.svg"][@width="100"]/span[@class="alt"][text()="Tiger"]', output, 1 end - test 'renders SVG image with alt text using img element when safe mode is secure' do - input = <<-EOS -[%interactive] -image::images/tiger.svg[Tiger,100] + test 'converts SVG image with alt text using img element when safe mode is secure' do + input = <<~'EOS' + [%interactive] + image::images/tiger.svg[Tiger,100] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.svg"][@alt="Tiger"]', output, 1 end test 'inserts fallback image for SVG inside object element using same dimensions' do - input = <<-EOS -:imagesdir: images + input = <<~'EOS' + :imagesdir: images -[%interactive] -image::tiger.svg[Tiger,100,fallback=tiger.png] + [%interactive] + image::tiger.svg[Tiger,100,fallback=tiger.png] EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="images/tiger.svg"][@width="100"]/img[@src="images/tiger.png"][@width="100"]', output, 1 end test 'detects SVG image URI that contains a query string' do - input = <<-EOS -:imagesdir: images + input = <<~'EOS' + :imagesdir: images -[%interactive] -image::http://example.org/tiger.svg?foo=bar[Tiger,100] + [%interactive] + image::http://example.org/tiger.svg?foo=bar[Tiger,100] EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="http://example.org/tiger.svg?foo=bar"][@width="100"]/span[@class="alt"][text()="Tiger"]', output, 1 end test 'detects SVG image when format attribute is svg' do - input = <<-EOS -:imagesdir: images + input = <<~'EOS' + :imagesdir: images -[%interactive] -image::http://example.org/tiger-svg[Tiger,100,format=svg] + [%interactive] + image::http://example.org/tiger-svg[Tiger,100,format=svg] EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="http://example.org/tiger-svg"][@width="100"]/span[@class="alt"][text()="Tiger"]', output, 1 end - test 'renders inline SVG image using svg element' do - input = <<-EOS -:imagesdir: fixtures + test 'converts inline SVG image using svg element' do + input = <<~'EOS' + :imagesdir: fixtures -[%inline] -image::circle.svg[Tiger,100] + [%inline] + image::circle.svg[Tiger,100] EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'docdir' => ::File.dirname(__FILE__) } + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => testdir } assert_match(/]*width="100px"[^>]*>/, output, 1) refute_match(/]*width="500px"[^>]*>/, output) refute_match(/]*height="500px"[^>]*>/, output) refute_match(/]*style="width:500px;height:500px"[^>]*>/, output) end - test 'renders inline SVG image using svg element even when data-uri is set' do - input = <<-EOS -:imagesdir: fixtures -:data-uri: + test 'converts inline SVG image using svg element even when data-uri is set' do + input = <<~'EOS' + :imagesdir: fixtures + :data-uri: -[%inline] -image::circle.svg[Tiger,100] + [%inline] + image::circle.svg[Tiger,100] EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'docdir' => ::File.dirname(__FILE__) } + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => testdir } assert_match(/]*width="100px">/, output, 1) end - test 'renders alt text for inline svg element if svg cannot be read' do - input = <<-EOS -[%inline] -image::no-such-image.svg[Alt Text] + test 'embeds remote inline SVG when allow-uri-read is set' do + input = %(image::http://#{resolve_localhost}:9876/fixtures/circle.svg[Circle,100,100,opts=inline]) + output = using_test_webserver do + convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } + end + + assert_css 'svg', output, 1 + assert_css 'svg[style]', output, 0 + assert_css 'svg[width="100px"]', output, 1 + assert_css 'svg[height="100px"]', output, 1 + assert_css 'svg circle', output, 1 + end + + test 'converts alt text for inline svg element if svg cannot be read' do + input = <<~'EOS' + [%inline] + image::no-such-image.svg[Alt Text] EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//span[@class="alt"][text()="Alt Text"]', output, 1 + assert_message @logger, :WARN, '~SVG does not exist or cannot be read' end - test 'can render block image with alt text defined in macro containing escaped square bracket' do - input = <<-EOS -image::images/tiger.png[A [Bengal\\] Tiger] - EOS + test 'can convert block image with alt text defined in macro containing square bracket' do + input = 'image::images/tiger.png[A [Bengal] Tiger]' + output = convert_string input + img = xmlnodes_at_xpath '//img', output, 1 + assert_equal 'A [Bengal] Tiger', img.attr('alt') + end - output = render_string input + test 'can convert block image with target containing spaces' do + input = 'image::images/big tiger.png[A Big Tiger]' + output = convert_string input img = xmlnodes_at_xpath '//img', output, 1 - assert_equal 'A [Bengal] Tiger', img.attr('alt').value + assert_equal 'images/big%20tiger.png', img.attr('src') + assert_equal 'A Big Tiger', img.attr('alt') end - test 'can render block image with alt text defined in block attribute above macro' do - input = <<-EOS -[Tiger] -image::images/tiger.png[] + test 'should not recognize block image if target has leading or trailing spaces' do + [' tiger.png', 'tiger.png '].each do |target| + input = %(image::#{target}[Tiger]) + + output = convert_string_to_embedded input + assert_xpath '//img', output, 0 + end + end + + test 'can convert block image with alt text defined in block attribute above macro' do + input = <<~'EOS' + [Tiger] + image::images/tiger.png[] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'alt text in macro overrides alt text above macro' do - input = <<-EOS -[Alt Text] -image::images/tiger.png[Tiger] + input = <<~'EOS' + [Alt Text] + image::images/tiger.png[Tiger] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end - test 'alt text is escaped in HTML backend' do - input = <<-EOS -image::images/open.png[File > Open] - EOS + test 'should substitute attribute references in alt text defined in image block macro' do + input = <<~'EOS' + :alt-text: Tiger - output = render_embedded_string input - assert_match(/File > Open/, output) + image::images/tiger.png[{alt-text}] + EOS + output = convert_string_to_embedded input + assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end - test 'alt text is escaped in DocBook backend' do - input = <<-EOS -image::images/open.png[File > Open] + test 'should set direction CSS class on image if float attribute is set' do + input = <<~'EOS' + [float=left] + image::images/tiger.png[Tiger] EOS - output = render_embedded_string input, :backend => :docbook - assert_match(/File > Open/, output) + output = convert_string_to_embedded input + assert_css '.imageblock.left', output, 1 + assert_css '.imageblock[style]', output, 0 end - test "can render block image with auto-generated alt text" do - input = <<-EOS -image::images/tiger.png[] + test 'should set text alignment CSS class on image if align attribute is set' do + input = <<~'EOS' + [align=center] + image::images/tiger.png[Tiger] EOS - output = render_embedded_string input - assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="tiger"]', output, 1 + output = convert_string_to_embedded input + assert_css '.imageblock.text-center', output, 1 + assert_css '.imageblock[style]', output, 0 end - test "can render block image with alt text and height and width" do - input = <<-EOS -image::images/tiger.png[Tiger, 200, 300] + test 'style attribute is dropped from image macro' do + input = <<~'EOS' + [style=value] + image::images/tiger.png[Tiger] EOS - output = render_embedded_string input + doc = document_from_string input + img = doc.blocks[0] + refute(img.attributes.key? 'style') + assert_nil img.style + end + + test 'should apply specialcharacters and replacement substitutions to alt text' do + input = 'A tiger\'s "roar" is < a bear\'s "growl"' + expected = 'A tiger’s "roar" is < a bear’s "growl"' + result = convert_string_to_embedded %(image::images/tiger-roar.png[#{input}]) + assert_includes result, %(alt="#{expected}") + end + + test 'should not encode double quotes in alt text when converting to DocBook' do + input = 'Select "File > Open"' + expected = 'Select "File > Open"' + result = convert_string_to_embedded %(image::images/open.png[#{input}]), backend: :docbook + assert_includes result, %(#{expected}) + end + + test 'should auto-generate alt text for block image if alt text is not specified' do + input = 'image::images/lions-and-tigers.png[]' + image = block_from_string input + assert_equal 'lions and tigers', (image.attr 'alt') + assert_equal 'lions and tigers', (image.attr 'default-alt') + output = image.convert + assert_xpath '/*[@class="imageblock"]//img[@src="images/lions-and-tigers.png"][@alt="lions and tigers"]', output, 1 + end + + test "can convert block image with alt text and height and width" do + input = 'image::images/tiger.png[Tiger, 200, 300]' + output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"][@width="200"][@height="300"]', output, 1 end - test "can render block image with link" do - input = <<-EOS -image::images/tiger.png[Tiger, link='http://en.wikipedia.org/wiki/Tiger'] + test "can convert block image with link" do + input = <<~'EOS' + image::images/tiger.png[Tiger, link='http://en.wikipedia.org/wiki/Tiger'] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//a[@class="image"][@href="http://en.wikipedia.org/wiki/Tiger"]/img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end - test "can render block image with caption" do - input = <<-EOS -.The AsciiDoc Tiger -image::images/tiger.png[Tiger] + test 'adds rel=noopener attribute to block image with link that targets _blank window' do + input = 'image::images/tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=_blank]' + output = convert_string_to_embedded input + assert_xpath '/*[@class="imageblock"]//a[@class="image"][@href="http://en.wikipedia.org/wiki/Tiger"][@target="_blank"][@rel="noopener"]/img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 + end + + test 'adds rel=noopener attribute to block image with link that targets name window when the noopener option is set' do + input = 'image::images/tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=name,opts=noopener]' + output = convert_string_to_embedded input + assert_xpath '/*[@class="imageblock"]//a[@class="image"][@href="http://en.wikipedia.org/wiki/Tiger"][@target="name"][@rel="noopener"]/img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 + end + + test 'adds rel=nofollow attribute to block image with a link when the nofollow option is set' do + input = 'image::images/tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,opts=nofollow]' + output = convert_string_to_embedded input + assert_xpath '/*[@class="imageblock"]//a[@class="image"][@href="http://en.wikipedia.org/wiki/Tiger"][@rel="nofollow"]/img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 + end + + test 'can convert block image with caption' do + input = <<~'EOS' + .The AsciiDoc Tiger + image::images/tiger.png[Tiger] EOS doc = document_from_string input - output = doc.render + assert_equal 1, doc.blocks[0].numeral + output = doc.convert assert_xpath '//*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 - assert_xpath '//*[@class="imageblock"]/*[@class="title"][text() = "Figure 1. The AsciiDoc Tiger"]', output, 1 + assert_xpath '//*[@class="imageblock"]/*[@class="title"][text()="Figure 1. The AsciiDoc Tiger"]', output, 1 assert_equal 1, doc.attributes['figure-number'] end - test 'can render block image with explicit caption' do - input = <<-EOS -[caption="Voila! "] -.The AsciiDoc Tiger -image::images/tiger.png[Tiger] + test 'can convert block image with explicit caption' do + input = <<~'EOS' + [caption="Voila! "] + .The AsciiDoc Tiger + image::images/tiger.png[Tiger] EOS doc = document_from_string input - output = doc.render + assert_nil doc.blocks[0].numeral + output = doc.convert assert_xpath '//*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 - assert_xpath '//*[@class="imageblock"]/*[@class="title"][text() = "Voila! The AsciiDoc Tiger"]', output, 1 - assert !doc.attributes.has_key?('figure-number') + assert_xpath '//*[@class="imageblock"]/*[@class="title"][text()="Voila! The AsciiDoc Tiger"]', output, 1 + refute doc.attributes.has_key?('figure-number') end test 'can align image in DocBook backend' do - input = <<-EOS -image::images/sunset.jpg[Sunset, align="right"] - EOS - - output = render_embedded_string input, :backend => :docbook + input = 'image::images/sunset.jpg[Sunset,align=right]' + output = convert_string_to_embedded input, backend: :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@align="right"]', output, 1 end - test 'can scale image in DocBook backend' do - input = <<-EOS -image::images/sunset.jpg[Sunset, scale="200"] - EOS + test 'should set content width and depth in DocBook backend if no scaling' do + input = 'image::images/sunset.jpg[Sunset,500,332]' + output = convert_string_to_embedded input, backend: :docbook + assert_xpath '//imagedata', output, 1 + assert_xpath '//imagedata[@contentwidth="500"]', output, 1 + assert_xpath '//imagedata[@contentdepth="332"]', output, 1 + assert_xpath '//imagedata[@width]', output, 0 + assert_xpath '//imagedata[@depth]', output, 0 + end - output = render_embedded_string input, :backend => :docbook + test 'can scale image in DocBook backend' do + input = 'image::images/sunset.jpg[Sunset,500,332,scale=200]' + output = convert_string_to_embedded input, backend: :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@scale="200"]', output, 1 + assert_xpath '//imagedata[@width]', output, 0 + assert_xpath '//imagedata[@depth]', output, 0 + assert_xpath '//imagedata[@contentwidth]', output, 0 + assert_xpath '//imagedata[@contentdepth]', output, 0 end - test 'can scale image width in DocBook backend' do - input = <<-EOS -image::images/sunset.jpg[Sunset, scaledwidth="25%"] - EOS - - output = render_embedded_string input, :backend => :docbook + test 'scale image width in DocBook backend' do + input = 'image::images/sunset.jpg[Sunset,500,332,scaledwidth=25%]' + output = convert_string_to_embedded input, backend: :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@width="25%"]', output, 1 - assert_xpath '//imagedata[@scalefit="1"]', output, 1 + assert_xpath '//imagedata[@depth]', output, 0 + assert_xpath '//imagedata[@contentwidth]', output, 0 + assert_xpath '//imagedata[@contentdepth]', output, 0 end test 'adds % to scaled width if no units given in DocBook backend ' do - input = <<-EOS -image::images/sunset.jpg[Sunset, scaledwidth="25"] - EOS - - output = render_embedded_string input, :backend => :docbook + input = 'image::images/sunset.jpg[Sunset,scaledwidth=25]' + output = convert_string_to_embedded input, backend: :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@width="25%"]', output, 1 - assert_xpath '//imagedata[@scalefit="1"]', output, 1 end - test 'keeps line unprocessed if image target is missing attribute reference and attribute-missing is skip' do - input = <<-EOS -:attribute-missing: skip + test 'keeps attribute reference unprocessed if image target is missing attribute reference and attribute-missing is skip' do + input = <<~'EOS' + :attribute-missing: skip -image::{bogus}[] + image::{bogus}[] EOS - output = render_embedded_string input - assert output.include?('image::{bogus}[]') + output = convert_string_to_embedded input + assert_css 'img[src="{bogus}"]', output, 1 + assert_empty @logger end - test 'drops line if image target is missing attribute reference and attribute-missing is drop' do - input = <<-EOS -:attribute-missing: drop + test 'should not drop line if image target is missing attribute reference and attribute-missing is drop' do + input = <<~'EOS' + :attribute-missing: drop -image::{bogus}[] + image::{bogus}/photo.jpg[] EOS - output = render_embedded_string input - assert output.strip.empty? + output = convert_string_to_embedded input + assert_css 'img[src="/photo.jpg"]', output, 1 + assert_empty @logger end test 'drops line if image target is missing attribute reference and attribute-missing is drop-line' do - input = <<-EOS -:attribute-missing: drop-line + input = <<~'EOS' + :attribute-missing: drop-line -image::{bogus}[] + image::{bogus}[] EOS - output = render_embedded_string input - assert output.strip.empty? + output = convert_string_to_embedded input + assert_empty output.strip + assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: bogus' + end + + test 'should not drop line if image target resolves to blank and attribute-missing is drop-line' do + input = <<~'EOS' + :attribute-missing: drop-line + + image::{blank}[] + EOS + + output = convert_string_to_embedded input + assert_css 'img[src=""]', output, 1 + assert_empty @logger end test 'dropped image does not break processing of following section and attribute-missing is drop-line' do - input = <<-EOS -:attribute-missing: drop-line + input = <<~'EOS' + :attribute-missing: drop-line -image::{bogus}[] + image::{bogus}[] -== Section Title + == Section Title EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'img', output, 0 assert_css 'h2', output, 1 - assert !output.include?('== Section Title') + refute_includes output, '== Section Title' + assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: bogus' end test 'should pass through image that references uri' do - input = <<-EOS -:imagesdir: images + input = <<~'EOS' + :imagesdir: images -image::http://asciidoc.org/images/tiger.png[Tiger] + image::http://asciidoc.org/images/tiger.png[Tiger] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="http://asciidoc.org/images/tiger.png"][@alt="Tiger"]', output, 1 end + test 'should encode spaces in image target if value is a URI' do + input = 'image::http://example.org/svg?digraph=digraph G { a -> b; }[diagram]' + output = convert_string_to_embedded input + assert_xpath %(/*[@class="imageblock"]//img[@src="http://example.org/svg?digraph=digraph%20G%20{%20a%20-#{decode_char 62}%20b;%20}"]), output, 1 + end + test 'can resolve image relative to imagesdir' do - input = <<-EOS -:imagesdir: images + input = <<~'EOS' + :imagesdir: images -image::tiger.png[Tiger] + image::tiger.png[Tiger] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'embeds base64-encoded data uri for image when data-uri attribute is set' do - input = <<-EOS -:data-uri: -:imagesdir: fixtures + input = <<~'EOS' + :data-uri: + :imagesdir: fixtures -image::dot.gif[Dot] + image::dot.gif[Dot] EOS - doc = document_from_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} + doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_equal 'fixtures', doc.attributes['imagesdir'] - output = doc.render + output = doc.convert assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end + test 'embeds SVG image with image/svg+xml mimetype when file extension is .svg' do + input = <<~'EOS' + :imagesdir: fixtures + :data-uri: + + image::circle.svg[Tiger,100] + EOS + + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => testdir } + assert_xpath '//img[starts-with(@src,"data:image/svg+xml;base64,")]', output, 1 + end + + test 'embeds empty base64-encoded data uri for unreadable image when data-uri attribute is set' do + input = <<~'EOS' + :data-uri: + :imagesdir: fixtures + + image::unreadable.gif[Dot] + EOS + + doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } + assert_equal 'fixtures', doc.attributes['imagesdir'] + output = doc.convert + assert_xpath '//img[@src="data:image/gif;base64,"]', output, 1 + assert_message @logger, :WARN, '~image to embed not found or not readable' + end + + test 'embeds base64-encoded data uri with application/octet-stream mimetype when file extension is missing' do + input = <<~'EOS' + :data-uri: + :imagesdir: fixtures + + image::dot[Dot] + EOS + + doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } + assert_equal 'fixtures', doc.attributes['imagesdir'] + output = doc.convert + assert_xpath '//img[starts-with(@src,"data:application/octet-stream;base64,")]', output, 1 + end + test 'embeds base64-encoded data uri for remote image when data-uri attribute is set' do - input = <<-EOS -:data-uri: + input = <<~EOS + :data-uri: -image::http://#{resolve_localhost}:9876/fixtures/dot.gif[Dot] + image::http://#{resolve_localhost}:9876/fixtures/dot.gif[Dot] EOS output = using_test_webserver do - render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} + convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'embeds base64-encoded data uri for remote image when imagesdir is a URI and data-uri attribute is set' do - input = <<-EOS -:data-uri: -:imagesdir: http://#{resolve_localhost}:9876/fixtures + input = <<~EOS + :data-uri: + :imagesdir: http://#{resolve_localhost}:9876/fixtures -image::dot.gif[Dot] + image::dot.gif[Dot] EOS output = using_test_webserver do - render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} + convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 @@ -1811,105 +2596,98 @@ test 'uses remote image uri when data-uri attribute is set and image cannot be retrieved' do image_uri = "http://#{resolve_localhost}:9876/fixtures/missing-image.gif" - input = <<-EOS -:data-uri: + input = <<~EOS + :data-uri: -image::#{image_uri}[Missing image] + image::#{image_uri}[Missing image] EOS output = using_test_webserver do - render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} + convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end assert_xpath %(/*[@class="imageblock"]//img[@src="#{image_uri}"][@alt="Missing image"]), output, 1 + assert_message @logger, :WARN, '~could not retrieve image data from URI' end test 'uses remote image uri when data-uri attribute is set and allow-uri-read is not set' do image_uri = "http://#{resolve_localhost}:9876/fixtures/dot.gif" - input = <<-EOS -:data-uri: + input = <<~EOS + :data-uri: -image::#{image_uri}[Dot] + image::#{image_uri}[Dot] EOS output = using_test_webserver do - render_embedded_string input, :safe => :safe + convert_string_to_embedded input, safe: :safe end assert_xpath %(/*[@class="imageblock"]//img[@src="#{image_uri}"][@alt="Dot"]), output, 1 end test 'can handle embedded data uri images' do - input = <<-EOS -image::data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=[Dot] - EOS - - output = render_embedded_string input + input = 'image::data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=[Dot]' + output = convert_string_to_embedded input assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'can handle embedded data uri images when data-uri attribute is set' do - input = <<-EOS -:data-uri: + input = <<~'EOS' + :data-uri: -image::data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=[Dot] + image::data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=[Dot] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end - # this test will cause a warning to be printed to the console (until we have a message facility) test 'cleans reference to ancestor directories in imagesdir before reading image if safe mode level is at least SAFE' do - input = <<-EOS -:data-uri: -:imagesdir: ../..//fixtures/./../../fixtures + input = <<~'EOS' + :data-uri: + :imagesdir: ../..//fixtures/./../../fixtures -image::dot.gif[Dot] + image::dot.gif[Dot] EOS - doc = document_from_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} + doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_equal '../..//fixtures/./../../fixtures', doc.attributes['imagesdir'] - output = doc.render + output = doc.convert # image target resolves to fixtures/dot.gif relative to docdir (which is explicitly set to the directory of this file) # the reference cannot fall outside of the document directory in safe mode assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 + assert_message @logger, :WARN, 'image has illegal reference to ancestor of jail; recovering automatically' end test 'cleans reference to ancestor directories in target before reading image if safe mode level is at least SAFE' do - input = <<-EOS -:data-uri: -:imagesdir: ./ + input = <<~'EOS' + :data-uri: + :imagesdir: ./ -image::../..//fixtures/./../../fixtures/dot.gif[Dot] + image::../..//fixtures/./../../fixtures/dot.gif[Dot] EOS - doc = document_from_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} + doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_equal './', doc.attributes['imagesdir'] - output = doc.render + output = doc.convert # image target resolves to fixtures/dot.gif relative to docdir (which is explicitly set to the directory of this file) # the reference cannot fall outside of the document directory in safe mode assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 + assert_message @logger, :WARN, 'image has illegal reference to ancestor of jail; recovering automatically' end end context 'Media' do - test 'should detect and render video macro' do - input = <<-EOS -video::cats-vs-dogs.avi[] - EOS - - output = render_embedded_string input + test 'should detect and convert video macro' do + input = 'video::cats-vs-dogs.avi[]' + output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 end - test 'should detect and render video macro with positional attributes for poster and dimensions' do - input = <<-EOS -video::cats-vs-dogs.avi[cats-and-dogs.png, 200, 300] - EOS - - output = render_embedded_string input + test 'should detect and convert video macro with positional attributes for poster and dimensions' do + input = 'video::cats-vs-dogs.avi[cats-and-dogs.png, 200, 300]' + output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 assert_css 'video[poster="cats-and-dogs.png"]', output, 1 @@ -1917,56 +2695,61 @@ assert_css 'video[height="300"]', output, 1 end - test 'video macro should honor all options' do - input = <<-EOS -video::cats-vs-dogs.avi[options="autoplay,nocontrols,loop"] - EOS + test 'should set direction CSS class on video block if float attribute is set' do + input = 'video::cats-vs-dogs.avi[cats-and-dogs.png,float=right]' + output = convert_string_to_embedded input + assert_css 'video', output, 1 + assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 + assert_css '.videoblock.right', output, 1 + end + + test 'should set text alignment CSS class on video block if align attribute is set' do + input = 'video::cats-vs-dogs.avi[cats-and-dogs.png,align=center]' + output = convert_string_to_embedded input + assert_css 'video', output, 1 + assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 + assert_css '.videoblock.text-center', output, 1 + end - output = render_embedded_string input + test 'video macro should honor all options' do + input = 'video::cats-vs-dogs.avi[options="autoplay,nocontrols,loop",preload="metadata"]' + output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[autoplay]', output, 1 assert_css 'video:not([controls])', output, 1 assert_css 'video[loop]', output, 1 + assert_css 'video[preload=metadata]', output, 1 end test 'video macro should add time range anchor with start time if start attribute is set' do - input = <<-EOS -video::cats-vs-dogs.avi[start="30"] - EOS - - output = render_embedded_string input + input = 'video::cats-vs-dogs.avi[start="30"]' + output = convert_string_to_embedded input assert_css 'video', output, 1 assert_xpath '//video[@src="cats-vs-dogs.avi#t=30"]', output, 1 end test 'video macro should add time range anchor with end time if end attribute is set' do - input = <<-EOS -video::cats-vs-dogs.avi[end="30"] - EOS - - output = render_embedded_string input + input = 'video::cats-vs-dogs.avi[end="30"]' + output = convert_string_to_embedded input assert_css 'video', output, 1 assert_xpath '//video[@src="cats-vs-dogs.avi#t=,30"]', output, 1 end test 'video macro should add time range anchor with start and end time if start and end attributes are set' do - input = <<-EOS -video::cats-vs-dogs.avi[start="30",end="60"] - EOS - - output = render_embedded_string input + input = 'video::cats-vs-dogs.avi[start="30",end="60"]' + output = convert_string_to_embedded input assert_css 'video', output, 1 assert_xpath '//video[@src="cats-vs-dogs.avi#t=30,60"]', output, 1 end test 'video macro should use imagesdir attribute to resolve target and poster' do - input = <<-EOS -:imagesdir: assets + input = <<~'EOS' + :imagesdir: assets -video::cats-vs-dogs.avi[cats-and-dogs.png, 200, 300] + video::cats-vs-dogs.avi[cats-and-dogs.png, 200, 300] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="assets/cats-vs-dogs.avi"]', output, 1 assert_css 'video[poster="assets/cats-and-dogs.png"]', output, 1 @@ -1975,46 +2758,40 @@ end test 'video macro should not use imagesdir attribute to resolve target if target is a URL' do - input = <<-EOS -:imagesdir: assets + input = <<~'EOS' + :imagesdir: assets -video::http://example.org/videos/cats-vs-dogs.avi[] + video::http://example.org/videos/cats-vs-dogs.avi[] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="http://example.org/videos/cats-vs-dogs.avi"]', output, 1 end test 'video macro should output custom HTML with iframe for vimeo service' do - input = <<-EOS -video::67480300[vimeo, 400, 300, start=60, options=autoplay] - EOS - output = render_embedded_string input + input = 'video::67480300[vimeo, 400, 300, start=60, options="autoplay,muted"]' + output = convert_string_to_embedded input assert_css 'video', output, 0 assert_css 'iframe', output, 1 - assert_css 'iframe[src="https://player.vimeo.com/video/67480300#at=60?autoplay=1"]', output, 1 + assert_css 'iframe[src="https://player.vimeo.com/video/67480300?autoplay=1&muted=1#at=60"]', output, 1 assert_css 'iframe[width="400"]', output, 1 assert_css 'iframe[height="300"]', output, 1 end test 'video macro should output custom HTML with iframe for youtube service' do - input = <<-EOS -video::U8GBXvdmHT4/PLg7s6cbtAD15Das5LK9mXt_g59DLWxKUe[youtube, 640, 360, start=60, options="autoplay,modest", theme=light] - EOS - output = render_embedded_string input + input = 'video::U8GBXvdmHT4/PLg7s6cbtAD15Das5LK9mXt_g59DLWxKUe[youtube, 640, 360, start=60, options="autoplay,muted,modest", theme=light]' + output = convert_string_to_embedded input assert_css 'video', output, 0 assert_css 'iframe', output, 1 - assert_css 'iframe[src="https://www.youtube.com/embed/U8GBXvdmHT4?rel=0&start=60&autoplay=1&list=PLg7s6cbtAD15Das5LK9mXt_g59DLWxKUe&modestbranding=1&theme=light"]', output, 1 + assert_css 'iframe[src="https://www.youtube.com/embed/U8GBXvdmHT4?rel=0&start=60&autoplay=1&mute=1&list=PLg7s6cbtAD15Das5LK9mXt_g59DLWxKUe&modestbranding=1&theme=light"]', output, 1 assert_css 'iframe[width="640"]', output, 1 assert_css 'iframe[height="360"]', output, 1 end test 'video macro should output custom HTML with iframe for youtube service with dynamic playlist' do - input = <<-EOS -video::SCZF6I-Rc4I,AsKGOeonbIs,HwrPhOp6-aM[youtube, 640, 360, start=60, options=autoplay] - EOS - output = render_embedded_string input + input = 'video::SCZF6I-Rc4I,AsKGOeonbIs,HwrPhOp6-aM[youtube, 640, 360, start=60, options=autoplay]' + output = convert_string_to_embedded input assert_css 'video', output, 0 assert_css 'iframe', output, 1 assert_css 'iframe[src="https://www.youtube.com/embed/SCZF6I-Rc4I?rel=0&start=60&autoplay=1&playlist=AsKGOeonbIs,HwrPhOp6-aM"]', output, 1 @@ -2022,207 +2799,253 @@ assert_css 'iframe[height="360"]', output, 1 end - test 'should detect and render audio macro' do - input = <<-EOS -audio::podcast.mp3[] - EOS - - output = render_embedded_string input + test 'should detect and convert audio macro' do + input = 'audio::podcast.mp3[]' + output = convert_string_to_embedded input assert_css 'audio', output, 1 assert_css 'audio[src="podcast.mp3"]', output, 1 end test 'audio macro should use imagesdir attribute to resolve target' do - input = <<-EOS -:imagesdir: assets + input = <<~'EOS' + :imagesdir: assets -audio::podcast.mp3[] + audio::podcast.mp3[] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'audio', output, 1 assert_css 'audio[src="assets/podcast.mp3"]', output, 1 end test 'audio macro should not use imagesdir attribute to resolve target if target is a URL' do - input = <<-EOS -:imagesdir: assets + input = <<~'EOS' + :imagesdir: assets -video::http://example.org/podcast.mp3[] + video::http://example.org/podcast.mp3[] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="http://example.org/podcast.mp3"]', output, 1 end test 'audio macro should honor all options' do - input = <<-EOS -audio::podcast.mp3[options="autoplay,nocontrols,loop"] - EOS - - output = render_embedded_string input + input = 'audio::podcast.mp3[options="autoplay,nocontrols,loop"]' + output = convert_string_to_embedded input assert_css 'audio', output, 1 assert_css 'audio[autoplay]', output, 1 assert_css 'audio:not([controls])', output, 1 assert_css 'audio[loop]', output, 1 end + + test 'audio macro should support start and end time' do + input = 'audio::podcast.mp3[start=1,end=2]' + output = convert_string_to_embedded input + assert_css 'audio', output, 1 + assert_css 'audio[controls]', output, 1 + assert_css 'audio[src="podcast.mp3#t=1,2"]', output, 1 + end end context 'Admonition icons' do test 'can resolve icon relative to default iconsdir' do - input = <<-EOS -:icons: + input = <<~'EOS' + :icons: -[TIP] -You can use icons for admonitions by setting the 'icons' attribute. + [TIP] + You can use icons for admonitions by setting the 'icons' attribute. EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SERVER + output = convert_string input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="./images/icons/tip.png"][@alt="Tip"]', output, 1 end test 'can resolve icon relative to custom iconsdir' do - input = <<-EOS -:icons: -:iconsdir: icons + input = <<~'EOS' + :icons: + :iconsdir: icons -[TIP] -You can use icons for admonitions by setting the 'icons' attribute. + [TIP] + You can use icons for admonitions by setting the 'icons' attribute. EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SERVER + output = convert_string input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="icons/tip.png"][@alt="Tip"]', output, 1 end test 'should add file extension to custom icon if not specified' do - input = <<-EOS -:icons: font -:iconsdir: images/icons + input = <<~'EOS' + :icons: font + :iconsdir: images/icons -[TIP,icon=a] -Override the icon of an admonition block using an attribute + [TIP,icon=a] + Override the icon of an admonition block using an attribute EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SERVER + output = convert_string input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="images/icons/a.png"]', output, 1 end + test 'should allow icontype to be specified when using built-in admonition icon' do + input = 'TIP: Set the icontype using either the icontype attribute on the icons attribute.' + [ + { 'icons' => '', 'ext' => 'png' }, + { 'icons' => '', 'icontype' => 'jpg', 'ext' => 'jpg' }, + { 'icons' => 'jpg', 'ext' => 'jpg' }, + { 'icons' => 'image', 'ext' => 'png' }, + ].each do |attributes| + expected_src = %(./images/icons/tip.#{attributes.delete 'ext'}) + output = convert_string input, attributes: attributes + assert_xpath %(//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="#{expected_src}"]), output, 1 + end + end + + test 'should allow icontype to be specified when using custom admonition icon' do + input = <<~'EOS' + [TIP,icon=hint] + Set the icontype using either the icontype attribute on the icons attribute. + EOS + [ + { 'icons' => '', 'ext' => 'png' }, + { 'icons' => '', 'icontype' => 'jpg', 'ext' => 'jpg' }, + { 'icons' => 'jpg', 'ext' => 'jpg' }, + { 'icons' => 'image', 'ext' => 'png' }, + ].each do |attributes| + expected_src = %(./images/icons/hint.#{attributes.delete 'ext'}) + output = convert_string input, attributes: attributes + assert_xpath %(//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="#{expected_src}"]), output, 1 + end + end + test 'embeds base64-encoded data uri of icon when data-uri attribute is set and safe mode level is less than SECURE' do - input = <<-EOS -:icons: -:iconsdir: fixtures -:icontype: gif -:data-uri: + input = <<~'EOS' + :icons: + :iconsdir: fixtures + :icontype: gif + :data-uri: + + [TIP] + You can use icons for admonitions by setting the 'icons' attribute. + EOS + + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } + assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Tip"]', output, 1 + end -[TIP] -You can use icons for admonitions by setting the 'icons' attribute. + test 'should embed base64-encoded data uri of custom icon when data-uri attribute is set' do + input = <<~'EOS' + :icons: + :iconsdir: fixtures + :icontype: gif + :data-uri: + + [TIP,icon=tip] + You can set a custom icon using the icon attribute on the block. EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Tip"]', output, 1 end test 'does not embed base64-encoded data uri of icon when safe mode level is SECURE or greater' do - input = <<-EOS -:icons: -:iconsdir: fixtures -:icontype: gif -:data-uri: + input = <<~'EOS' + :icons: + :iconsdir: fixtures + :icontype: gif + :data-uri: -[TIP] -You can use icons for admonitions by setting the 'icons' attribute. + [TIP] + You can use icons for admonitions by setting the 'icons' attribute. EOS - output = render_string input, :attributes => {'icons' => ''} + output = convert_string input, attributes: { 'icons' => '' } assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="fixtures/tip.gif"][@alt="Tip"]', output, 1 end test 'cleans reference to ancestor directories before reading icon if safe mode level is at least SAFE' do - input = <<-EOS -:icons: -:iconsdir: ../fixtures -:icontype: gif -:data-uri: + input = <<~'EOS' + :icons: + :iconsdir: ../fixtures + :icontype: gif + :data-uri: -[TIP] -You can use icons for admonitions by setting the 'icons' attribute. + [TIP] + You can use icons for admonitions by setting the 'icons' attribute. EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Tip"]', output, 1 + assert_message @logger, :WARN, 'image has illegal reference to ancestor of jail; recovering automatically' end test 'should import Font Awesome and use font-based icons when value of icons attribute is font' do - input = <<-EOS -:icons: font + input = <<~'EOS' + :icons: font -[TIP] -You can use icons for admonitions by setting the 'icons' attribute. + [TIP] + You can use icons for admonitions by setting the 'icons' attribute. EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SERVER - assert_css 'html > head > link[rel="stylesheet"][href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.min.css"]', output, 1 + output = convert_string input, safe: Asciidoctor::SafeMode::SERVER + assert_css %(html > head > link[rel="stylesheet"][href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/#{Asciidoctor::FONT_AWESOME_VERSION}/css/font-awesome.min.css"]), output, 1 assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/i[@class="fa icon-tip"]', output, 1 end test 'font-based icon should not override icon specified on admonition' do - input = <<-EOS -:icons: font -:iconsdir: images/icons + input = <<~'EOS' + :icons: font + :iconsdir: images/icons -[TIP,icon=a.png] -Override the icon of an admonition block using an attribute + [TIP,icon=a.png] + Override the icon of an admonition block using an attribute EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SERVER + output = convert_string input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/i[@class="fa icon-tip"]', output, 0 assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="images/icons/a.png"]', output, 1 end test 'should use http uri scheme for assets when asset-uri-scheme is http' do - input = <<-EOS -:asset-uri-scheme: http -:icons: font -:source-highlighter: highlightjs + input = <<~'EOS' + :asset-uri-scheme: http + :icons: font + :source-highlighter: highlightjs -TIP: You can control the URI scheme used for assets with the asset-uri-scheme attribute + TIP: You can control the URI scheme used for assets with the asset-uri-scheme attribute -[source,ruby] -puts "AsciiDoc, FTW!" + [source,ruby] + puts "AsciiDoc, FTW!" EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SAFE - assert_css 'html > head > link[rel="stylesheet"][href="http://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.min.css"]', output, 1 - assert_css 'html > body > script[src="http://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js"]', output, 1 + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE + assert_css %(html > head > link[rel="stylesheet"][href="http://cdnjs.cloudflare.com/ajax/libs/font-awesome/#{Asciidoctor::FONT_AWESOME_VERSION}/css/font-awesome.min.css"]), output, 1 + assert_css %(html > body > script[src="http://cdnjs.cloudflare.com/ajax/libs/highlight.js/#{Asciidoctor::HIGHLIGHT_JS_VERSION}/highlight.min.js"]), output, 1 end test 'should use no uri scheme for assets when asset-uri-scheme is blank' do - input = <<-EOS -:asset-uri-scheme: -:icons: font -:source-highlighter: highlightjs + input = <<~'EOS' + :asset-uri-scheme: + :icons: font + :source-highlighter: highlightjs -TIP: You can control the URI scheme used for assets with the asset-uri-scheme attribute + TIP: You can control the URI scheme used for assets with the asset-uri-scheme attribute -[source,ruby] -puts "AsciiDoc, FTW!" + [source,ruby] + puts "AsciiDoc, FTW!" EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SAFE - assert_css 'html > head > link[rel="stylesheet"][href="//cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.min.css"]', output, 1 - assert_css 'html > body > script[src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js"]', output, 1 + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE + assert_css %(html > head > link[rel="stylesheet"][href="//cdnjs.cloudflare.com/ajax/libs/font-awesome/#{Asciidoctor::FONT_AWESOME_VERSION}/css/font-awesome.min.css"]), output, 1 + assert_css %(html > body > script[src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/#{Asciidoctor::HIGHLIGHT_JS_VERSION}/highlight.min.js"]), output, 1 end end context 'Image paths' do - test 'restricts access to ancestor directories when safe mode level is at least SAFE' do - input = <<-EOS -image::asciidoctor.png[Asciidoctor] - EOS - basedir = File.expand_path File.dirname(__FILE__) - block = block_from_string input, :attributes => {'docdir' => basedir} + input = 'image::asciidoctor.png[Asciidoctor]' + basedir = testdir + block = block_from_string input, attributes: { 'docdir' => basedir } doc = block.document assert doc.safe >= Asciidoctor::SafeMode::SAFE @@ -2232,11 +3055,9 @@ end test 'does not restrict access to ancestor directories when safe mode is disabled' do - input = <<-EOS -image::asciidoctor.png[Asciidoctor] - EOS - basedir = File.expand_path File.dirname(__FILE__) - block = block_from_string input, :safe => Asciidoctor::SafeMode::UNSAFE, :attributes => {'docdir' => basedir} + input = 'image::asciidoctor.png[Asciidoctor]' + basedir = testdir + block = block_from_string input, safe: Asciidoctor::SafeMode::UNSAFE, attributes: { 'docdir' => basedir } doc = block.document assert doc.safe == Asciidoctor::SafeMode::UNSAFE @@ -2250,302 +3071,86 @@ context 'Source code' do test 'should support fenced code block using backticks' do - input = <<-EOS -``` -puts "Hello, World!" -``` + input = <<~'EOS' + ``` + puts "Hello, World!" + ``` EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.listingblock', output, 1 assert_css '.listingblock pre code', output, 1 assert_css '.listingblock pre code:not([class])', output, 1 end test 'should not recognize fenced code blocks with more than three delimiters' do - input = <<-EOS -````ruby -puts "Hello, World!" -```` + input = <<~'EOS' + ````ruby + puts "Hello, World!" + ```` -~~~~ javascript -alert("Hello, World!") -~~~~ + ~~~~ javascript + alert("Hello, World!") + ~~~~ EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.listingblock', output, 0 end test 'should support fenced code blocks with languages' do - input = <<-EOS -```ruby -puts "Hello, World!" -``` + input = <<~'EOS' + ```ruby + puts "Hello, World!" + ``` -``` javascript -alert("Hello, World!") -``` + ``` javascript + alert("Hello, World!") + ``` EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.listingblock', output, 2 assert_css '.listingblock pre code.language-ruby[data-lang=ruby]', output, 1 assert_css '.listingblock pre code.language-javascript[data-lang=javascript]', output, 1 end test 'should support fenced code blocks with languages and numbering' do - input = <<-EOS -```ruby,numbered -puts "Hello, World!" -``` + input = <<~'EOS' + ```ruby,numbered + puts "Hello, World!" + ``` -``` javascript, numbered -alert("Hello, World!") -``` + ``` javascript, numbered + alert("Hello, World!") + ``` EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.listingblock', output, 2 assert_css '.listingblock pre code.language-ruby[data-lang=ruby]', output, 1 assert_css '.listingblock pre code.language-javascript[data-lang=javascript]', output, 1 end - - test 'should highlight source if source-highlighter attribute is coderay' do - input = <<-EOS -:source-highlighter: coderay - -[source, ruby] ----- -require 'coderay' - -html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) ----- - EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :linkcss_default => true - assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 - assert_match(/\.CodeRay *\{/, output) - end - - test 'should read source language from source-language document attribute if not specified on source block' do - input = <<-EOS -:source-highlighter: coderay -:source-language: ruby - -[source] ----- -require 'coderay' - -html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) ----- - EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SAFE, :linkcss_default => true - assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 - end - - test 'should rename document attribute named language to source-language when compat-mode is enabled' do - input = <<-EOS -:language: ruby - -{source-language} - EOS - - assert_equal 'ruby', render_string(input, :doctype => :inline, :attributes => {'compat-mode' => ''}) - - input = <<-EOS -:language: ruby - -{source-language} - EOS - - assert_equal '{source-language}', render_string(input, :doctype => :inline) - end - - test 'should replace callout marks but not highlight them if source-highlighter attribute is coderay' do - input = <<-EOS -:source-highlighter: coderay - -[source, ruby] ----- -require 'coderay' # <1> - -html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) # <2> -puts html # <3> <4> -exit 0 # <5><6> ----- -<1> Load library -<2> Highlight source -<3> Print to stdout -<4> Redirect to a file to capture output -<5> Exit program -<6> Reports success - EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SAFE - assert_match(/coderay<\/span>.* \(1\)<\/b>$/, output) - assert_match(/puts 'Hello, world!'<\/span>.* \(2\)<\/b>$/, output) - assert_match(/puts html * \(3\)<\/b> \(4\)<\/b>$/, output) - assert_match(/exit.* \(5\)<\/b> \(6\)<\/b><\/code>/, output) - end - - test 'should restore callout marks to correct lines if source highlighter is coderay and table line numbering is enabled' do - input = <<-EOS -:source-highlighter: coderay -:coderay-linenums-mode: table - -[source, ruby, numbered] ----- -require 'coderay' # <1> - -html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) # <2> -puts html # <3> <4> -exit 0 # <5><6> ----- -<1> Load library -<2> Highlight source -<3> Print to stdout -<4> Redirect to a file to capture output -<5> Exit program -<6> Reports success - EOS - output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SAFE - assert_match(/coderay<\/span>.* \(1\)<\/b>$/, output) - assert_match(/puts 'Hello, world!'<\/span>.* \(2\)<\/b>$/, output) - assert_match(/puts html * \(3\)<\/b> \(4\)<\/b>$/, output) - assert_match(/exit.* \(5\)<\/b> \(6\)<\/b><\/pre>/, output) - end - - test 'should preserve passthrough placeholders when highlighting source using coderay' do - input = <<-EOS -:source-highlighter: coderay - -[source,java] -[subs="specialcharacters,macros,callouts"] ----- -public class Printer { - public static void main(String[] args) { - System.pass:quotes[_out_].println("*asterisks* make text pass:quotes[*bold*]"); - } -} ----- - EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SAFE - assert_match(/\.out<\/em>\./, output, 1) - assert_match(/\*asterisks\*/, output, 1) - assert_match(/bold<\/strong>/, output, 1) - assert !output.include?(Asciidoctor::Substitutors::PASS_START) - end - - test 'should link to CodeRay stylesheet if source-highlighter is coderay and linkcss is set' do - input = <<-EOS -:source-highlighter: coderay - -[source, ruby] ----- -require 'coderay' - -html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) ----- - EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'linkcss' => ''} - assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 - assert_css 'link[rel="stylesheet"][href="./coderay-asciidoctor.css"]', output, 1 - end - - test 'should highlight source inline if source-highlighter attribute is coderay and coderay-css is style' do - input = <<-EOS -:source-highlighter: coderay -:coderay-css: style - -[source, ruby] ----- -require 'coderay' - -html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) ----- - EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :linkcss_default => true - assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@style = "color:#036;font-weight:bold"][text() = "CodeRay"]', output, 1 - refute_match(/\.CodeRay \{/, output) - end - - test 'should include remote highlight.js assets if source-highlighter attribute is highlightjs' do - input = <<-EOS -:source-highlighter: highlightjs - -[source, javascript] ----- - - - ----- - EOS - output = render_string input, :safe => Asciidoctor::SafeMode::SAFE - assert_match(/ {'source-highlighter' => 'prettify'} - assert_css 'pre[class="prettyprint highlight"]', output, 1 - assert_css 'pre > code.language-ruby[data-lang="ruby"]', output, 1 - end - - test 'should set lang attribute on pre when source-highlighter is html-pipeline' do - input = <<-EOS -[source,ruby] ----- -filters = [ - HTML::Pipeline::AsciiDocFilter, - HTML::Pipeline::SanitizationFilter, - HTML::Pipeline::SyntaxHighlightFilter -] - -puts HTML::Pipeline.new(filters, {}).call(input)[:output] ----- - EOS - - output = render_string input, :attributes => {'source-highlighter' => 'html-pipeline'} - assert_css 'pre[lang="ruby"]', output, 1 - assert_css 'pre[lang="ruby"] > code', output, 1 - assert_css 'pre[class]', output, 0 - assert_css 'code[class]', output, 0 - end - - test 'document cannot turn on source highlighting if safe mode is at least SERVER' do - input = <<-EOS -:source-highlighter: coderay - EOS - doc = document_from_string input, :safe => Asciidoctor::SafeMode::SERVER - assert doc.attributes['source-highlighter'].nil? - end end context 'Abstract and Part Intro' do test 'should make abstract on open block without title a quote block for article' do - input = <<-EOS -= Article + input = <<~'EOS' + = Article -[abstract] --- -This article is about stuff. + [abstract] + -- + This article is about stuff. -And other stuff. --- + And other stuff. + -- -== Section One + == Section One -content + content EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock.abstract', output, 1 assert_css '#preamble .quoteblock', output, 1 @@ -2554,21 +3159,21 @@ end test 'should make abstract on open block with title a quote block with title for article' do - input = <<-EOS -= Article + input = <<~'EOS' + = Article -.My abstract -[abstract] --- -This article is about stuff. --- + .My abstract + [abstract] + -- + This article is about stuff. + -- -== Section One + == Section One -content + content EOS - output = render_string input + output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock.abstract', output, 1 assert_css '#preamble .quoteblock', output, 1 @@ -2578,124 +3183,112 @@ end test 'should allow abstract in document with title if doctype is book' do - input = <<-EOS -= Book -:doctype: book + input = <<~'EOS' + = Book + :doctype: book -[abstract] -Abstract for book with title is valid + [abstract] + Abstract for book with title is valid EOS - output = render_string input + output = convert_string input assert_css '.abstract', output, 1 end test 'should not allow abstract as direct child of document if doctype is book' do - input = <<-EOS -:doctype: book + input = <<~'EOS' + :doctype: book -[abstract] -Abstract for book without title is invalid. + [abstract] + Abstract for book without title is invalid. EOS - warnings = nil - output = nil - redirect_streams do |stdout, stderr| - output = render_string input - warnings = stderr.string - end + output = convert_string input assert_css '.abstract', output, 0 - refute_nil warnings - assert_match(/WARNING:.*abstract block/, warnings) + assert_message @logger, :WARN, 'abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' end - test 'should make abstract on open block without title rendered to DocBook' do - input = <<-EOS -= Article + test 'should make abstract on open block without title converted to DocBook' do + input = <<~'EOS' + = Article -[abstract] --- -This article is about stuff. + [abstract] + -- + This article is about stuff. -And other stuff. --- + And other stuff. + -- EOS - output = render_string input, :backend => 'docbook' + output = convert_string input, backend: 'docbook' assert_css 'abstract', output, 1 assert_css 'abstract > simpara', output, 2 end - test 'should make abstract on open block with title rendered to DocBook' do - input = <<-EOS -= Article - -.My abstract -[abstract] --- -This article is about stuff. --- + test 'should make abstract on open block with title converted to DocBook' do + input = <<~'EOS' + = Article + + .My abstract + [abstract] + -- + This article is about stuff. + -- EOS - output = render_string input, :backend => 'docbook' + output = convert_string input, backend: 'docbook' assert_css 'abstract', output, 1 assert_css 'abstract > title', output, 1 assert_css 'abstract > title + simpara', output, 1 end - test 'should allow abstract in document with title if doctype is book rendered to DocBook' do - input = <<-EOS -= Book -:doctype: book + test 'should allow abstract in document with title if doctype is book converted to DocBook' do + input = <<~'EOS' + = Book + :doctype: book -[abstract] -Abstract for book with title is valid + [abstract] + Abstract for book with title is valid EOS - output = render_string input, :backend => 'docbook' + output = convert_string input, backend: 'docbook' assert_css 'abstract', output, 1 end - test 'should not allow abstract as direct child of document if doctype is book rendered to DocBook' do - input = <<-EOS -:doctype: book - -[abstract] -Abstract for book is invalid. + test 'should not allow abstract as direct child of document if doctype is book converted to DocBook' do + input = <<~'EOS' + :doctype: book + + [abstract] + Abstract for book is invalid. EOS - output = nil - warnings = nil - redirect_streams do |stdout, stderr| - output = render_string input, :backend => 'docbook' - warnings = stderr.string - end + output = convert_string input, backend: 'docbook' assert_css 'abstract', output, 0 - refute_nil warnings - assert_match(/WARNING:.*abstract block/, warnings) + assert_message @logger, :WARN, 'abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' end # TODO partintro shouldn't be recognized if doctype is not book, should be in proper place test 'should accept partintro on open block without title' do - input = <<-EOS -= Book -:doctype: book + input = <<~'EOS' + = Book + :doctype: book -= Part 1 + = Part 1 -[partintro] --- -This is a part intro. + [partintro] + -- + This is a part intro. -It can have multiple paragraphs. --- + It can have multiple paragraphs. + -- -== Chapter 1 + == Chapter 1 -content + content EOS - output = render_string input + output = convert_string input assert_css '.openblock', output, 1 assert_css '.openblock.partintro', output, 1 assert_css '.openblock .title', output, 0 @@ -2705,139 +3298,156 @@ end test 'should accept partintro on open block with title' do - input = <<-EOS -= Book -:doctype: book + input = <<~'EOS' + = Book + :doctype: book -= Part 1 + = Part 1 -.Intro title -[partintro] --- -This is a part intro with a title. --- + .Intro title + [partintro] + -- + This is a part intro with a title. + -- -== Chapter 1 + == Chapter 1 -content + content EOS - output = render_string input + output = convert_string input assert_css '.openblock', output, 1 assert_css '.openblock.partintro', output, 1 assert_css '.openblock .title', output, 1 assert_css '.openblock .content', output, 1 assert_xpath %(//h1[@id="_part_1"]/following-sibling::*[#{contains_class(:openblock)}]), output, 1 - assert_xpath %(//*[#{contains_class(:openblock)}]/*[@class="title"][text() = "Intro title"]), output, 1 + assert_xpath %(//*[#{contains_class(:openblock)}]/*[@class="title"][text()="Intro title"]), output, 1 assert_xpath %(//*[#{contains_class(:openblock)}]/*[@class="content"]/*[@class="paragraph"]), output, 1 end test 'should exclude partintro if not a child of part' do - input = <<-EOS -= Book -:doctype: book + input = <<~'EOS' + = Book + :doctype: book -[partintro] -part intro paragraph + [partintro] + part intro paragraph EOS - output = render_string input + output = convert_string input assert_css '.partintro', output, 0 + assert_message @logger, :ERROR, 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' end test 'should not allow partintro unless doctype is book' do - input = <<-EOS -[partintro] -part intro paragraph + input = <<~'EOS' + [partintro] + part intro paragraph EOS - output = render_string input + output = convert_string input assert_css '.partintro', output, 0 + assert_message @logger, :ERROR, 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' end - test 'should accept partintro on open block without title rendered to DocBook' do - input = <<-EOS -= Book -:doctype: book + test 'should accept partintro on open block without title converted to DocBook' do + input = <<~'EOS' + = Book + :doctype: book -= Part 1 + = Part 1 -[partintro] --- -This is a part intro. + [partintro] + -- + This is a part intro. -It can have multiple paragraphs. --- + It can have multiple paragraphs. + -- -== Chapter 1 + == Chapter 1 -content + content EOS - output = render_string input, :backend => 'docbook45' + output = convert_string input, backend: 'docbook' assert_css 'partintro', output, 1 - assert_css 'part#_part_1 > partintro', output, 1 + assert_css 'part[xml|id="_part_1"] > partintro', output, 1 assert_css 'partintro > simpara', output, 2 end - test 'should accept partintro on open block with title rendered to DocBook' do - input = <<-EOS -= Book -:doctype: book + test 'should accept partintro on open block with title converted to DocBook' do + input = <<~'EOS' + = Book + :doctype: book -= Part 1 + = Part 1 -.Intro title -[partintro] --- -This is a part intro with a title. --- + .Intro title + [partintro] + -- + This is a part intro with a title. + -- -== Chapter 1 + == Chapter 1 -content + content EOS - output = render_string input, :backend => 'docbook45' + output = convert_string input, backend: 'docbook' assert_css 'partintro', output, 1 - assert_css 'part#_part_1 > partintro', output, 1 + assert_css 'part[xml|id="_part_1"] > partintro', output, 1 assert_css 'partintro > title', output, 1 assert_css 'partintro > title + simpara', output, 1 end - test 'should exclude partintro if not a child of part rendered to DocBook' do - input = <<-EOS -= Book -:doctype: book + test 'should exclude partintro if not a child of part converted to DocBook' do + input = <<~'EOS' + = Book + :doctype: book -[partintro] -part intro paragraph + [partintro] + part intro paragraph EOS - output = render_string input, :backend => 'docbook' + output = convert_string input, backend: 'docbook' assert_css 'partintro', output, 0 + assert_message @logger, :ERROR, 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' end - test 'should not allow partintro unless doctype is book rendered to DocBook' do - input = <<-EOS -[partintro] -part intro paragraph + test 'should not allow partintro unless doctype is book converted to DocBook' do + input = <<~'EOS' + [partintro] + part intro paragraph EOS - output = render_string input, :backend => 'docbook' + output = convert_string input, backend: 'docbook' assert_css 'partintro', output, 0 + assert_message @logger, :ERROR, 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' end end context 'Substitutions' do + test 'processor should not crash if subs are empty' do + input = <<~'EOS' + [subs=","] + .... + content + .... + EOS + + doc = document_from_string input + block = doc.blocks.first + assert_equal [], block.subs + end + test 'should be able to append subs to default block substitution list' do - input = <<-EOS -:application: Asciidoctor + input = <<~'EOS' + :application: Asciidoctor -[subs="+attributes,+macros"] -.... -{application} -.... + [subs="+attributes,+macros"] + .... + {application} + .... EOS doc = document_from_string input @@ -2846,13 +3456,13 @@ end test 'should be able to prepend subs to default block substitution list' do - input = <<-EOS -:application: Asciidoctor + input = <<~'EOS' + :application: Asciidoctor -[subs="attributes+"] -.... -{application} -.... + [subs="attributes+"] + .... + {application} + .... EOS doc = document_from_string input @@ -2861,9 +3471,9 @@ end test 'should be able to remove subs to default block substitution list' do - input = <<-EOS -[subs="-quotes,-replacements"] -content + input = <<~'EOS' + [subs="-quotes,-replacements"] + content EOS doc = document_from_string input @@ -2872,111 +3482,184 @@ end test 'should be able to prepend, append and remove subs from default block substitution list' do - input = <<-EOS -:application: asciidoctor + input = <<~'EOS' + :application: asciidoctor -[subs="attributes+,-verbatim,+specialcharacters,+macros"] -.... -http://{application}.org[{gt}{gt}] <1> -.... + [subs="attributes+,-verbatim,+specialcharacters,+macros"] + .... + https://{application}.org[{gt}{gt}] <1> + .... EOS - doc = document_from_string input, :header_footer => false + doc = document_from_string input, standalone: false block = doc.blocks.first assert_equal [:attributes, :specialcharacters, :macros], block.subs - result = doc.render - assert result.include?('
    >> <1>
    ') + result = doc.convert + assert_includes result, '
    >> <1>
    ' end test 'should be able to set subs then modify them' do - input = <<-EOS -[subs="verbatim,-callouts"] -_hey now_ <1> + input = <<~'EOS' + [subs="verbatim,-callouts"] + _hey now_ <1> EOS - doc = document_from_string input, :header_footer => false + doc = document_from_string input, standalone: false block = doc.blocks.first assert_equal [:specialcharacters], block.subs - result = doc.render - assert result.include?('_hey now_ <1>') + result = doc.convert + assert_includes result, '_hey now_ <1>' end end context 'References' do test 'should not recognize block anchor with illegal id characters' do - input = <<-EOS -[[illegal$id,Reference Text]] ----- -content ----- + input = <<~'EOS' + [[illegal$id,Reference Text]] + ---- + content + ---- EOS doc = document_from_string input block = doc.blocks.first assert_nil block.id assert_nil(block.attr 'reftext') - assert !doc.references[:ids].has_key?('illegal$id') + refute doc.catalog[:refs].key? 'illegal$id' + end + + test 'should not recognize block anchor that starts with digit' do + input = <<~'EOS' + [[3-blind-mice]] + -- + see how they run + -- + EOS + + output = convert_string_to_embedded input + assert_includes output, '[[3-blind-mice]]' + assert_xpath '/*[@id=":3-blind-mice"]', output, 0 + end + + test 'should recognize block anchor that starts with colon' do + input = <<~'EOS' + [[:idname]] + -- + content + -- + EOS + + output = convert_string_to_embedded input + assert_xpath '/*[@id=":idname"]', output, 1 end test 'should use specified id and reftext when registering block reference' do - input = <<-EOS -[[debian,Debian Install]] -.Installation on Debian ----- -$ apt-get install asciidoctor ----- + input = <<~'EOS' + [[debian,Debian Install]] + .Installation on Debian + ---- + $ apt-get install asciidoctor + ---- EOS doc = document_from_string input - reftext = doc.references[:ids]['debian'] - refute_nil reftext - assert_equal 'Debian Install', reftext + ref = doc.catalog[:refs]['debian'] + refute_nil ref + assert_equal 'Debian Install', ref.reftext + assert_equal 'debian', (doc.resolve_id 'Debian Install') end test 'should allow square brackets in block reference text' do - input = <<-EOS -[[debian,[Debian] Install]] -.Installation on Debian ----- -$ apt-get install asciidoctor ----- + input = <<~'EOS' + [[debian,[Debian] Install]] + .Installation on Debian + ---- + $ apt-get install asciidoctor + ---- EOS doc = document_from_string input - reftext = doc.references[:ids]['debian'] - refute_nil reftext - assert_equal '[Debian] Install', reftext + ref = doc.catalog[:refs]['debian'] + refute_nil ref + assert_equal '[Debian] Install', ref.reftext + assert_equal 'debian', (doc.resolve_id '[Debian] Install') end test 'should allow comma in block reference text' do - input = <<-EOS -[[debian, Debian, Ubuntu]] -.Installation on Debian ----- -$ apt-get install asciidoctor ----- + input = <<~'EOS' + [[debian, Debian, Ubuntu]] + .Installation on Debian + ---- + $ apt-get install asciidoctor + ---- + EOS + + doc = document_from_string input + ref = doc.catalog[:refs]['debian'] + refute_nil ref + assert_equal 'Debian, Ubuntu', ref.reftext + assert_equal 'debian', (doc.resolve_id 'Debian, Ubuntu') + end + + test 'should resolve attribute reference in title using attribute defined at location of block' do + input = <<~'EOS' + = Document Title + :foo: baz + + intro paragraph. see <>. + + :foo: bar + + .foo is {foo} + [#formal-para] + paragraph with title + + [discrete#free-standing] + == foo is still {foo} + EOS + + doc = document_from_string input + ref = doc.catalog[:refs]['formal-para'] + refute_nil ref + assert_equal 'foo is bar', ref.title + assert_equal 'formal-para', (doc.resolve_id 'foo is bar') + output = doc.convert standalone: false + assert_include 'foo is still bar', output + assert_include '

    foo is still bar

    ', output + end + + test 'should substitute attribute references in reftext when registering block reference' do + input = <<~'EOS' + :label-tiger: Tiger + + [[tiger-evolution,Evolution of the {label-tiger}]] + **** + Information about the evolution of the tiger. + **** EOS doc = document_from_string input - reftext = doc.references[:ids]['debian'] - refute_nil reftext - assert_equal 'Debian, Ubuntu', reftext + ref = doc.catalog[:refs]['tiger-evolution'] + refute_nil ref + assert_equal 'Evolution of the Tiger', ref.attributes['reftext'] + assert_equal 'tiger-evolution', (doc.resolve_id 'Evolution of the Tiger') end test 'should use specified reftext when registering block reference' do - input = <<-EOS -[[debian]] -[reftext="Debian Install"] -.Installation on Debian ----- -$ apt-get install asciidoctor ----- + input = <<~'EOS' + [[debian]] + [reftext="Debian Install"] + .Installation on Debian + ---- + $ apt-get install asciidoctor + ---- EOS doc = document_from_string input - reftext = doc.references[:ids]['debian'] - refute_nil reftext - assert_equal 'Debian Install', reftext + ref = doc.catalog[:refs]['debian'] + refute_nil ref + assert_equal 'Debian Install', ref.reftext + assert_equal 'debian', (doc.resolve_id 'Debian Install') end end end diff -Nru asciidoctor-1.5.5/test/converter_test.rb asciidoctor-2.0.10/test/converter_test.rb --- asciidoctor-1.5.5/test/converter_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/converter_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,77 +1,88 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end -require 'tilt' unless defined? ::Tilt +# frozen_string_literal: true +require_relative 'test_helper' +require 'tilt' unless defined? ::Tilt.new context 'Converter' do - context 'View options' do test 'should set Haml format to html5 for html5 backend' do - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) + doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/haml'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates['paragraph'].is_a? Tilt::HamlTemplate + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Tilt::HamlTemplate, selected.templates['paragraph'] assert_equal :html5, selected.templates['paragraph'].options[:format] end test 'should set Haml format to xhtml for docbook backend' do - doc = Asciidoctor::Document.new [], :backend => 'docbook45', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) + doc = Asciidoctor::Document.new [], backend: 'docbook5', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates['paragraph'].is_a? Tilt::HamlTemplate + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Tilt::HamlTemplate, selected.templates['paragraph'] assert_equal :xhtml, selected.templates['paragraph'].options[:format] end + test 'should configure Slim to resolve includes in specified template dirs' do + template_dirs = [(fixture_path 'custom-backends/slim'), (fixture_path 'custom-backends/slim-overrides')] + doc = Asciidoctor::Document.new [], template_dirs: template_dirs, template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter + selected = doc.converter.find_converter('paragraph') + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Slim::Template, selected.templates['paragraph'] + assert_equal template_dirs.reverse.map {|dir| File.expand_path dir }, selected.templates['paragraph'].options[:include_dirs] + end + + test 'should coerce template_dirs option to an Array' do + template_dirs = fixture_path 'custom-backends/slim' + doc = Asciidoctor::Document.new [], template_dirs: template_dirs, template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter + selected = doc.converter.find_converter('paragraph') + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Array, (selected.instance_variable_get :@template_dirs) + end + test 'should set Slim format to html for html5 backend' do - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) + doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates['paragraph'].is_a? Slim::Template + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Slim::Template, selected.templates['paragraph'] assert_equal :html, selected.templates['paragraph'].options[:format] end test 'should set Slim format to nil for docbook backend' do - doc = Asciidoctor::Document.new [], :backend => 'docbook45', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) + doc = Asciidoctor::Document.new [], backend: 'docbook5', template_dir: (fixture_path 'custom-backends/slim'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates['paragraph'].is_a? Slim::Template + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Slim::Template, selected.templates['paragraph'] assert_nil selected.templates['paragraph'].options[:format] end test 'should set safe mode of Slim AsciiDoc engine to match document safe mode when Slim >= 3' do - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false, :safe => :unsafe - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) + doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false, safe: :unsafe + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') - assert selected.is_a? Asciidoctor::Converter::TemplateConverter + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected slim_asciidoc_opts = selected.instance_variable_get(:@engine_options)[:slim][:asciidoc] - if ::Slim::VERSION >= '3.0' - assert_equal({ :safe => Asciidoctor::SafeMode::UNSAFE }, slim_asciidoc_opts) - else - assert_nil slim_asciidoc_opts - end + assert_equal({ safe: Asciidoctor::SafeMode::UNSAFE }, slim_asciidoc_opts) end test 'should support custom template engine options for known engine' do - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false, :template_engine_options => { :slim => { :pretty => true } } - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) + doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false, template_engine_options: { slim: { pretty: true } } + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates['paragraph'].is_a? Slim::Template + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Slim::Template, selected.templates['paragraph'] assert_equal true, selected.templates['paragraph'].options[:pretty] end test 'should support custom template engine options' do - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false, :template_engine_options => { :slim => { :pretty => true } } - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) + doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false, template_engine_options: { slim: { pretty: true } } + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates['paragraph'].is_a? Slim::Template + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Slim::Template, selected.templates['paragraph'] assert_equal false, selected.templates['paragraph'].options[:sort_attrs] assert_equal true, selected.templates['paragraph'].options[:pretty] end @@ -79,63 +90,63 @@ context 'Custom backends' do test 'should load Haml templates for default backend' do - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) - ['paragraph', 'sidebar'].each do |node_name| + doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/haml'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter + %w(paragraph sidebar).each do |node_name| selected = doc.converter.find_converter node_name - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates[node_name].is_a? Tilt::HamlTemplate + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Tilt::HamlTemplate, selected.templates[node_name] assert_equal %(block_#{node_name}.html.haml), File.basename(selected.templates[node_name].file) end end test 'should set outfilesuffix according to backend info' do doc = Asciidoctor.load 'content' - doc.render + doc.convert assert_equal '.html', doc.attributes['outfilesuffix'] - doc = Asciidoctor.load 'content', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false - doc.render + doc = Asciidoctor.load 'content', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false + doc.convert assert_equal '.html', doc.attributes['outfilesuffix'] end test 'should not override outfilesuffix attribute if locked' do - doc = Asciidoctor.load 'content', :attributes => {'outfilesuffix' => '.foo'} - doc.render + doc = Asciidoctor.load 'content', attributes: { 'outfilesuffix' => '.foo' } + doc.convert assert_equal '.foo', doc.attributes['outfilesuffix'] - doc = Asciidoctor.load 'content', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false, :attributes => {'outfilesuffix' => '.foo'} - doc.render + doc = Asciidoctor.load 'content', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false, attributes: { 'outfilesuffix' => '.foo' } + doc.convert assert_equal '.foo', doc.attributes['outfilesuffix'] end - test 'should load Haml templates for docbook45 backend' do - doc = Asciidoctor::Document.new [], :backend => 'docbook45', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) - ['paragraph'].each do |node_name| + test 'should load Haml templates for docbook5 backend' do + doc = Asciidoctor::Document.new [], backend: 'docbook5', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter + %w(paragraph).each do |node_name| selected = doc.converter.find_converter node_name - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates[node_name].is_a? Tilt::HamlTemplate + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Tilt::HamlTemplate, selected.templates[node_name] assert_equal %(block_#{node_name}.xml.haml), File.basename(selected.templates[node_name].file) end end test 'should use Haml templates in place of built-in templates' do - input = <<-EOS -= Document Title -Author Name - -== Section One - -Sample paragraph - -.Related -**** -Sidebar content -**** + input = <<~'EOS' + = Document Title + Author Name + + == Section One + + Sample paragraph + + .Related + **** + Sidebar content + **** EOS - output = render_embedded_string input, :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false + output = convert_string_to_embedded input, template_dir: (fixture_path 'custom-backends/haml'), template_cache: false assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p', output, 1 assert_xpath '//aside', output, 1 assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p/following-sibling::aside', output, 1 @@ -143,36 +154,50 @@ assert_xpath '//aside/header/following-sibling::p[text()="Sidebar content"]', output, 1 end + test 'should allow custom backend to emulate a known backend' do + doc = Asciidoctor.load 'content', backend: 'html5-tweaks:html', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false + assert doc.basebackend? 'html' + assert_equal 'html5-tweaks', doc.backend + converter = doc.converter + assert_kind_of Asciidoctor::Converter::TemplateConverter, (converter.find_converter 'embedded') + refute_kind_of Asciidoctor::Converter::TemplateConverter, (converter.find_converter 'admonition') + assert_equal '

    content

    ', doc.convert + end + + test 'should create template converter even when a converter is not registered for the specified backend' do + input = 'paragraph content' + output = convert_string_to_embedded input, backend: :unknown, template_dir: (fixture_path 'custom-backends/haml/html5-tweaks'), template_cache: false + assert_equal '

    paragraph content

    ', output + end + test 'should use built-in global cache to cache templates' do begin - # clear out any cache, just to be sure Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter - - template_dir = File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml') - doc = Asciidoctor::Document.new [], :template_dir => template_dir + template_dir = fixture_path 'custom-backends/haml' + doc = Asciidoctor::Document.new [], template_dir: template_dir doc.converter caches = Asciidoctor::Converter::TemplateConverter.caches - if defined? ::ThreadSafe::Cache - assert caches[:templates].is_a?(::ThreadSafe::Cache) - assert !caches[:templates].empty? + if defined? ::Concurrent::Map + assert_kind_of ::Concurrent::Map, caches[:templates] + refute_empty caches[:templates] paragraph_template_before = caches[:templates].values.find {|t| File.basename(t.file) == 'block_paragraph.html.haml' } - assert !paragraph_template_before.nil? + refute_nil paragraph_template_before # should use cache - doc = Asciidoctor::Document.new [], :template_dir => template_dir + doc = Asciidoctor::Document.new [], template_dir: template_dir template_converter = doc.converter.find_converter('paragraph') paragraph_template_after = template_converter.templates['paragraph'] - assert !paragraph_template_after.nil? + refute_nil paragraph_template_after assert paragraph_template_before.eql?(paragraph_template_after) # should not use cache - doc = Asciidoctor::Document.new [], :template_dir => template_dir, :template_cache => false + doc = Asciidoctor::Document.new [], template_dir: template_dir, template_cache: false template_converter = doc.converter.find_converter('paragraph') paragraph_template_after = template_converter.templates['paragraph'] - assert !paragraph_template_after.nil? - assert !paragraph_template_before.eql?(paragraph_template_after) + refute_nil paragraph_template_after + refute paragraph_template_before.eql?(paragraph_template_after) else - assert caches.empty? + assert_empty caches end ensure # clean up @@ -181,25 +206,22 @@ end test 'should use custom cache to cache templates' do - template_dir = File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml') + template_dir = fixture_path 'custom-backends/haml' Asciidoctor::PathResolver.new.system_path(File.join(template_dir, 'html5', 'block_paragraph.html.haml'), nil) - caches = { :scans => {}, :templates => {} } - doc = Asciidoctor::Document.new [], :template_dir => template_dir, :template_cache => caches + caches = { scans: {}, templates: {} } + doc = Asciidoctor::Document.new [], template_dir: template_dir, template_cache: caches doc.converter - assert !caches[:scans].empty? - assert !caches[:templates].empty? + refute_empty caches[:scans] + refute_empty caches[:templates] paragraph_template = caches[:templates].values.find {|t| File.basename(t.file) == 'block_paragraph.html.haml' } - assert !paragraph_template.nil? - assert paragraph_template.is_a? ::Tilt::HamlTemplate + refute_nil paragraph_template + assert_kind_of ::Tilt::HamlTemplate, paragraph_template end test 'should be able to disable template cache' do begin - # clear out any cache, just to be sure Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter - - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), - :template_cache => false + doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/haml'), template_cache: false doc.converter caches = Asciidoctor::Converter::TemplateConverter.caches assert caches.empty? || caches[:scans].empty? @@ -211,141 +233,338 @@ end test 'should load ERB templates using ERBTemplate if eruby is not set' do - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'erb'), :template_cache => false - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) - ['paragraph'].each do |node_name| + input = %([.wrapper]\n--\nfoobar\n--) + doc = Asciidoctor::Document.new input, template_dir: (fixture_path 'custom-backends/erb'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter + %w(paragraph).each do |node_name| selected = doc.converter.find_converter node_name - assert selected.is_a? Asciidoctor::Converter::TemplateConverter + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected template = selected.templates[node_name] - assert template.is_a? Tilt::ERBTemplate - assert !(template.is_a? Tilt::ErubisTemplate) - assert template.instance_variable_get('@engine').is_a? ::ERB + assert_kind_of Tilt::ERBTemplate, template + refute_kind_of Tilt::ErubisTemplate, template + assert_kind_of ::ERB, template.instance_variable_get('@engine') assert_equal %(block_#{node_name}.html.erb), File.basename(selected.templates[node_name].file) end + # NOTE verify behavior of trim mode + expected_output = <<~'EOS'.chop +
    +
    +
    +

    foobar

    +
    +
    +
    + EOS + assert_equal expected_output, doc.convert end test 'should load ERB templates using ErubisTemplate if eruby is set to erubis' do - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'erb'), :template_cache => false, :eruby => 'erubis' - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) - ['paragraph'].each do |node_name| + doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/erb'), template_cache: false, eruby: 'erubis' + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter + %w(paragraph).each do |node_name| selected = doc.converter.find_converter node_name - assert selected.is_a? Asciidoctor::Converter::TemplateConverter + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected template = selected.templates[node_name] - assert template.is_a? Tilt::ERBTemplate - assert template.is_a? Tilt::ErubisTemplate - assert template.instance_variable_get('@engine').is_a? ::Erubis::FastEruby + assert_kind_of Tilt::ERBTemplate, template + assert_kind_of Tilt::ErubisTemplate, template + assert_kind_of ::Erubis::FastEruby, template.instance_variable_get('@engine') assert_equal %(block_#{node_name}.html.erb), File.basename(selected.templates[node_name].file) end end test 'should load Slim templates for default backend' do - doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) - ['paragraph', 'sidebar'].each do |node_name| + doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter + %w(paragraph sidebar).each do |node_name| selected = doc.converter.find_converter node_name - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates[node_name].is_a? Slim::Template + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Slim::Template, selected.templates[node_name] assert_equal %(block_#{node_name}.html.slim), File.basename(selected.templates[node_name].file) end end - test 'should load Slim templates for docbook45 backend' do - doc = Asciidoctor::Document.new [], :backend => 'docbook45', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false - assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) - ['paragraph'].each do |node_name| + test 'should load Slim templates for docbook5 backend' do + doc = Asciidoctor::Document.new [], backend: 'docbook5', template_dir: (fixture_path 'custom-backends/slim'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter + %w(paragraph).each do |node_name| selected = doc.converter.find_converter node_name - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates[node_name].is_a? Slim::Template + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Slim::Template, selected.templates[node_name] assert_equal %(block_#{node_name}.xml.slim), File.basename(selected.templates[node_name].file) end end test 'should use Slim templates in place of built-in templates' do - input = <<-EOS -= Document Title -Author Name - -== Section One - -Sample paragraph - -.Related -**** -Sidebar content -**** + input = <<~'EOS' + = Document Title + Author Name + + == Section One + + Sample paragraph + + .Related + **** + Sidebar content + **** EOS - output = render_embedded_string input, :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false + output = convert_string_to_embedded input, template_dir: (fixture_path 'custom-backends/slim'), template_cache: false assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p', output, 1 assert_xpath '//aside', output, 1 assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p/following-sibling::aside', output, 1 assert_xpath '//aside/header/h1[text()="Related"]', output, 1 assert_xpath '//aside/header/following-sibling::p[text()="Sidebar content"]', output, 1 end + end + + context 'Custom converters' do + test 'should derive backend traits for the given backend' do + expected = { basebackend: 'dita', filetype: 'dita', outfilesuffix: '.dita' } + actual = Asciidoctor::Converter.derive_backend_traits 'dita2' + assert_equal expected, actual + end - test 'should use custom converter if specified' do - input = <<-EOS -= Document Title + test 'should use specified converter for current backend' do + input = <<~'EOS' + = Document Title -preamble + preamble -== Section + == Section -content + content EOS - class CustomConverterA - def initialize backend, opts = {} - end + class CustomHtmlConverterA + def initialize *args; end def convert node, name = nil 'document' end + end + + doc = document_from_string input, converter: CustomHtmlConverterA + assert_kind_of CustomHtmlConverterA, doc.converter + assert_equal 'html', doc.attributes['filetype'] + assert 'document', doc.convert + end + + test 'should use specified converter for specified backend' do + input = <<~'EOS' + = Document Title - def self.converts? backend - true + preamble + + == Section + + content + EOS + + class CustomTextConverterA + def initialize *args; end + + def convert node, name = nil + 'document' end end - output = render_string input, :converter => CustomConverterA - assert 'document', output + doc = document_from_string input, backend: 'text', converter: CustomTextConverterA + assert_kind_of CustomTextConverterA, doc.converter + assert_equal 'text', doc.attributes['filetype'] + assert 'document', doc.convert end - test 'should use converter registered for backend' do - input = <<-EOS -content + test 'should get converter from specified converter factory' do + input = <<~'EOS' + = Document Title + + preamble + + == Section + + content EOS + my_converter_class = Class.new Asciidoctor::Converter::Base do + def convert_document node + 'document' + end + end + + converter_factory = Asciidoctor::Converter::CustomFactory.new 'html5' => my_converter_class + + doc = document_from_string input, converter_factory: converter_factory + assert_kind_of my_converter_class, doc.converter + assert_equal 'html', doc.attributes['filetype'] + assert 'document', doc.convert + end + + test 'should allow converter to set htmlsyntax when basebackend is html' do + input = 'image::sunset.jpg[]' + converter = Asciidoctor::Converter.create 'html5', htmlsyntax: 'xml' + doc = document_from_string input, converter: converter + assert_equal converter, doc.converter + assert_equal 'xml', (doc.attr 'htmlsyntax') + output = doc.convert standalone: false + assert_includes output, 'sunset' + end + + test 'should use converter registered for backend' do begin - Asciidoctor::Converter::Factory.unregister_all + converters_before = Asciidoctor::Converter.converters class CustomConverterB include Asciidoctor::Converter register_for 'foobar' + def initialize *args + super + basebackend 'text' + filetype 'text' + outfilesuffix '.fb' + end + def convert node, name = nil 'foobar content' end end - converters = Asciidoctor::Converter::Factory.converters - assert converters.size == 1 + input = 'content' + assert_equal CustomConverterB, (Asciidoctor::Converter.for 'foobar') + converters = Asciidoctor::Converter.converters + assert converters.size == converters_before.size + 1 assert converters['foobar'] == CustomConverterB - output = render_string input, :backend => 'foobar' + output = convert_string input, backend: 'foobar' assert 'foobar content', output ensure - Asciidoctor::Converter::Factory.unregister_all + Asciidoctor::Converter.unregister_all end end - test 'should fall back to catch all converter' do - input = <<-EOS -content - EOS + test 'should be able to register converter using symbol' do + begin + converter = Class.new Asciidoctor::Converter::Base do + register_for :foobaz + def initialize *args + super + basebackend 'text' + filetype 'text' + outfilesuffix '.fb' + end + end + assert_equal converter, (Asciidoctor::Converter.for 'foobaz') + ensure + Asciidoctor::Converter.unregister_all + end + end + test 'should be able to register converter from converter class itself' do begin - Asciidoctor::Converter::Factory.unregister_all + assert_nil Asciidoctor::Converter.for 'foobar' - class CustomConverterC + class AnotherCustomConverterB + include Asciidoctor::Converter + end + + AnotherCustomConverterB.register_for 'foobar' + assert_equal AnotherCustomConverterB, (Asciidoctor::Converter.for 'foobar') + ensure + Asciidoctor::Converter.unregister_all + end + end + + test 'should map handles? method on converter to respond_to? implementation by default' do + class CustomConverterC + include Asciidoctor::Converter + def convert_paragraph node + 'paragraph' + end + end + + converter = CustomConverterC.new 'myhtml' + assert_respond_to converter, :handles? + assert converter.handles?(:convert_paragraph) + end + + test 'should not configure converter to support templates by default' do + begin + class CustomConverterD + include Asciidoctor::Converter + register_for 'myhtml' + + def convert node, transform = nil, opts = nil + transform ||= node.node_name + send transform, node + end + + def document node + ['', '', '', node.content, '', ''] * %(\n) + end + + def paragraph node + ['
    ', %(

    #{node.content}

    ), '
    '] * %(\n) + end + end + + input = 'paragraph' + doc = document_from_string input, backend: 'myhtml', template_dir: (fixture_path 'custom-backends/slim/html5'), template_cache: false + assert_kind_of CustomConverterD, doc.converter + refute doc.converter.supports_templates? + output = doc.convert + assert_xpath '//*[@class="paragraph"]/p[text()="paragraph"]', output, 1 + ensure + Asciidoctor::Converter.unregister_all + end + end + + test 'should wrap converter in composite converter with template converter if it declares that it supports templates' do + begin + class CustomConverterE < Asciidoctor::Converter::Base + register_for 'myhtml' + + def initialize *args + super + supports_templates + end + + def convert node, transform = nil, opts = nil + transform ||= node.node_name + send transform, node + end + + alias handles? respond_to? + + def document node + ['', '', '', node.content, '', ''] * %(\n) + end + + def paragraph node + ['
    ', %(

    #{node.content}

    ), '
    '] * %(\n) + end + end + + input = 'paragraph' + doc = document_from_string input, backend: 'myhtml', template_dir: (fixture_path 'custom-backends/slim/html5'), template_cache: false + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter + output = doc.convert + assert_xpath '//*[@class="paragraph"]/p[text()="paragraph"]', output, 0 + assert_xpath '//body/p[text()="paragraph"]', output, 1 + ensure + Asciidoctor::Converter.unregister_all + end + end + + test 'should map Factory.new to DefaultFactoryProxy constructor by default' do + assert_equal (Asciidoctor::Converter.for 'html5'), (Asciidoctor::Converter::Factory.new.for 'html5') + end + + test 'should map Factory.new to CustomFactory constructor if proxy keyword arg is false' do + assert_nil (Asciidoctor::Converter::Factory.new proxy_default: false).for 'html5' + end + + test 'should default to catch all converter' do + begin + class CustomConverterF include Asciidoctor::Converter register_for '*' def convert node, name = nil @@ -353,13 +572,76 @@ end end - converters = Asciidoctor::Converter::Factory.converters - assert converters['*'] == CustomConverterC - output = render_string input, :backend => 'foobaz' + input = 'content' + assert_equal CustomConverterF, (Asciidoctor::Converter.for 'all') + assert_equal CustomConverterF, (Asciidoctor::Converter.for 'whatever') + refute_equal CustomConverterF, (Asciidoctor::Converter.for 'html5') + converters = Asciidoctor::Converter.converters + assert_nil converters['*'] + assert_equal CustomConverterF, (Asciidoctor::Converter.send :catch_all) + output = convert_string input, backend: 'foobaz' assert 'foobaz content', output ensure - Asciidoctor::Converter::Factory.unregister_all + Asciidoctor::Converter.unregister_all + end + end + + test 'should use catch all converter from custom factory only if no other converter matches' do + class FooConverter < Asciidoctor::Converter::Base; end + class CatchAllConverter < Asciidoctor::Converter::Base; end + + factory = Asciidoctor::Converter::CustomFactory.new 'foo' => FooConverter, '*' => CatchAllConverter + assert_equal FooConverter, (factory.for 'foo') + assert_equal CatchAllConverter, (factory.for 'nada') + assert_equal CatchAllConverter, (factory.for 'html5') + end + + test 'should prefer catch all converter from proxy over statically registered catch all converter' do + begin + class StaticCatchAllConverter < Asciidoctor::Converter::Base + register_for '*' + end + + class LocalCatchAllConverter < Asciidoctor::Converter::Base; end + + factory = Asciidoctor::Converter::DefaultFactoryProxy.new '*' => LocalCatchAllConverter + assert_equal LocalCatchAllConverter, (factory.for 'foobar') + refute_equal LocalCatchAllConverter, (factory.for 'html5') + refute_equal StaticCatchAllConverter, (factory.for 'html5') + ensure + Asciidoctor::Converter.unregister_all end end + + test 'should prefer converter in proxy with same name as provided converter' do + class MyHtml5Converter < Asciidoctor::Converter::Base; end + factory = Asciidoctor::Converter::DefaultFactoryProxy.new 'html5' => MyHtml5Converter + assert_equal MyHtml5Converter, (factory.for 'html5') + end + + test 'should allow nil to be registered as converter' do + factory = Asciidoctor::Converter::DefaultFactoryProxy.new 'html5' => nil + assert_nil factory.for 'html5' + end + + test 'should create a new custom factory when Converter::Factory.new is invoked' do + class MyConverter < Asciidoctor::Converter::Base; end + factory = Asciidoctor::Converter::Factory.new 'mine' => MyConverter + assert_kind_of Asciidoctor::Converter::CustomFactory, factory + assert_equal MyConverter, (factory.for 'mine') + end + + test 'should delegate to method on HTML 5 converter with convert_ prefix if called without prefix' do + doc = document_from_string 'paragraph' + result = doc.converter.paragraph doc.blocks[0] + assert_css 'p', result, 1 + end + + test 'can call read_svg_contents on built-in HTML5 converter; should remove markup prior the root svg element' do + doc = document_from_string 'image::circle.svg[]', base_dir: fixturedir + result = doc.converter.read_svg_contents doc.blocks[0], 'circle.svg' + refute_nil result + assert result.start_with? ' 'server' + doc = empty_document safe: 'server' assert_equal Asciidoctor::SafeMode::SERVER, doc.safe - doc = empty_document :safe => 'foo' + doc = empty_document safe: 'foo' assert_equal Asciidoctor::SafeMode::SECURE, doc.safe end test 'safe mode level set using symbol' do - doc = empty_document :safe => :server + doc = empty_document safe: :server assert_equal Asciidoctor::SafeMode::SERVER, doc.safe - doc = empty_document :safe => :foo + doc = empty_document safe: :foo assert_equal Asciidoctor::SafeMode::SECURE, doc.safe end test 'safe mode level set using integer' do - doc = empty_document :safe => 10 + doc = empty_document safe: 10 assert_equal Asciidoctor::SafeMode::SERVER, doc.safe - doc = empty_document :safe => 100 + doc = empty_document safe: 100 assert_equal 100, doc.safe end @@ -58,13 +58,13 @@ assert_equal Asciidoctor::SafeMode::SECURE, doc.attr('safe-mode-level') assert_equal 'secure', doc.attr('safe-mode-name') assert doc.attr?('safe-mode-secure') - assert !doc.attr?('safe-mode-unsafe') - assert !doc.attr?('safe-mode-safe') - assert !doc.attr?('safe-mode-server') + refute doc.attr?('safe-mode-unsafe') + refute doc.attr?('safe-mode-safe') + refute doc.attr?('safe-mode-server') end test 'safe mode level can be set in the constructor' do - doc = Asciidoctor::Document.new [], :safe => Asciidoctor::SafeMode::SAFE + doc = Asciidoctor::Document.new [], safe: Asciidoctor::SafeMode::SAFE assert_equal Asciidoctor::SafeMode::SAFE, doc.safe end @@ -77,8 +77,8 @@ end end - test 'toc and sectnums should be enabled by default for DocBook backend' do - doc = document_from_string 'content', :backend => 'docbook', :parse => true + test 'toc and sectnums should be enabled by default in DocBook backend' do + doc = document_from_string 'content', backend: 'docbook', parse: true assert doc.attr?('toc') assert doc.attr?('sectnums') result = doc.convert @@ -87,7 +87,7 @@ end test 'maxdepth attribute should be set on asciidoc-toc and asciidoc-numbered processing instructions in DocBook backend' do - doc = document_from_string 'content', :backend => 'docbook', :parse => true, :attributes => {'toclevels' => '1', 'sectnumlevels' => '1' } + doc = document_from_string 'content', backend: 'docbook', parse: true, attributes: { 'toclevels' => '1', 'sectnumlevels' => '1' } assert doc.attr?('toc') assert doc.attr?('sectnums') result = doc.convert @@ -95,796 +95,149 @@ assert_match('', result) end - test 'should be able to disable toc and sectnums in document header for DocBook backend' do - input = <<-EOS -= Document Title -:toc!: -:sectnums!: - EOS - doc = document_from_string input, :backend => 'docbook' - assert !doc.attr?('toc') - assert !doc.attr?('sectnums') - end - - test 'should be able to disable section numbering using numbered attribute in document header for DocBook backend' do - input = <<-EOS -= Document Title -:numbered!: - EOS - doc = document_from_string input, :backend => 'docbook' - assert !doc.attr?('sectnums') - end - end - - context 'Load APIs' do - test 'should load input file' do - sample_input_path = fixture_path('sample.asciidoc') - doc = Asciidoctor.load(File.new(sample_input_path), :safe => Asciidoctor::SafeMode::SAFE) - assert_equal 'Document Title', doc.doctitle - assert_equal File.expand_path(sample_input_path), doc.attr('docfile') - assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') - end - - test 'should load input file from filename' do - sample_input_path = fixture_path('sample.asciidoc') - doc = Asciidoctor.load_file(sample_input_path, :safe => Asciidoctor::SafeMode::SAFE) - assert_equal 'Document Title', doc.doctitle - assert_equal File.expand_path(sample_input_path), doc.attr('docfile') - assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') - end - - test 'should not load invalid file' do - sample_input_path = fixture_path('hello-asciidoctor.pdf') - exception = assert_raises ArgumentError do - Asciidoctor.load_file(sample_input_path, :safe => Asciidoctor::SafeMode::SAFE) - end - assert_match(/Failed to load AsciiDoc document/, exception.message) - # verify we have the correct backtrace (should be in at least first 5 lines) - assert_match((RUBY_ENGINE == 'rbx' ? /parser\.rb/ : /helpers\.rb/), exception.backtrace[0..4].join("\n")) - end if RUBY_MIN_VERSION_1_9 - - test 'should load input IO' do - input = StringIO.new(<<-EOS) -Document Title -============== - -preamble - EOS - doc = Asciidoctor.load(input, :safe => Asciidoctor::SafeMode::SAFE) - assert_equal 'Document Title', doc.doctitle - assert !doc.attr?('docfile') - assert_equal doc.base_dir, doc.attr('docdir') - end - - test 'should load input string' do - input = <<-EOS -Document Title -============== - -preamble - EOS - doc = Asciidoctor.load(input, :safe => Asciidoctor::SafeMode::SAFE) - assert_equal 'Document Title', doc.doctitle - assert !doc.attr?('docfile') - assert_equal doc.base_dir, doc.attr('docdir') - end - - test 'should load input string array' do - input = <<-EOS -Document Title -============== - -preamble - EOS - doc = Asciidoctor.load(input.lines.entries, :safe => Asciidoctor::SafeMode::SAFE) - assert_equal 'Document Title', doc.doctitle - assert !doc.attr?('docfile') - assert_equal doc.base_dir, doc.attr('docdir') - end - - test 'should accept attributes as array' do - # NOTE there's a tab character before idseparator - doc = Asciidoctor.load('text', :attributes => %w(toc sectnums source-highlighter=coderay idprefix idseparator=-)) - assert doc.attributes.is_a?(Hash) - assert doc.attr?('toc') - assert_equal '', doc.attr('toc') - assert doc.attr?('sectnums') - assert_equal '', doc.attr('sectnums') - assert doc.attr?('source-highlighter') - assert_equal 'coderay', doc.attr('source-highlighter') - assert doc.attr?('idprefix') - assert_equal '', doc.attr('idprefix') - assert doc.attr?('idseparator') - assert_equal '-', doc.attr('idseparator') - end - - test 'should accept attributes as empty array' do - doc = Asciidoctor.load('text', :attributes => []) - assert doc.attributes.is_a?(Hash) - end - - test 'should accept attributes as string' do - # NOTE there's a tab character before idseparator - doc = Asciidoctor.load('text', :attributes => 'toc sectnums source-highlighter=coderay idprefix idseparator=-') - assert doc.attributes.is_a?(Hash) - assert doc.attr?('toc') - assert_equal '', doc.attr('toc') - assert doc.attr?('sectnums') - assert_equal '', doc.attr('sectnums') - assert doc.attr?('source-highlighter') - assert_equal 'coderay', doc.attr('source-highlighter') - assert doc.attr?('idprefix') - assert_equal '', doc.attr('idprefix') - assert doc.attr?('idseparator') - assert_equal '-', doc.attr('idseparator') - end - - test 'should accept values containing spaces in attributes string' do - # NOTE there's a tab character before self: - doc = Asciidoctor.load('text', :attributes => 'idprefix idseparator=- note-caption=Note\ to\ self: toc') - assert doc.attributes.is_a?(Hash) - assert doc.attr?('idprefix') - assert_equal '', doc.attr('idprefix') - assert doc.attr?('idseparator') - assert_equal '-', doc.attr('idseparator') - assert doc.attr?('note-caption') - assert_equal "Note to self:", doc.attr('note-caption') - end - - test 'should accept attributes as empty string' do - doc = Asciidoctor.load('text', :attributes => '') - assert doc.attributes.is_a?(Hash) - end - - test 'should accept attributes as nil' do - doc = Asciidoctor.load('text', :attributes => nil) - assert doc.attributes.is_a?(Hash) - end - - test 'should accept attributes if hash like' do - class Hashish - def initialize - @table = {'toc' => ''} - end - - def keys - @table.keys - end - - def [](key) - @table[key] - end - end - - doc = Asciidoctor.load('text', :attributes => Hashish.new) - assert doc.attributes.is_a?(Hash) - assert doc.attributes.has_key?('toc') - end - - test 'should output timestamps by default' do - doc = document_from_string 'text', :backend => :html5, :attributes => nil - result = doc.convert - assert doc.attr?('docdate') - refute doc.attr? 'reproducible' - assert_xpath '//div[@id="footer-text" and contains(string(.//text()), "Last updated")]', result, 1 - end - - test 'should not output timestamps if reproducible attribute is set in HTML 5' do - doc = document_from_string 'text', :backend => :html5, :attributes => { 'reproducible' => '' } - result = doc.convert - assert doc.attr?('docdate') - assert doc.attr?('reproducible') - assert_xpath '//div[@id="footer-text" and contains(string(.//text()), "Last updated")]', result, 0 - end - - test 'should not output timestamps if reproducible attribute is set in DocBook' do - doc = document_from_string 'text', :backend => :docbook, :attributes => { 'reproducible' => '' } - result = doc.convert - assert doc.attr?('docdate') - assert doc.attr?('reproducible') - assert_xpath '/article/info/date', result, 0 - end - - test 'should not modify options argument' do - options = { - :safe => Asciidoctor::SafeMode::SAFE - } - options.freeze - sample_input_path = fixture_path('sample.asciidoc') - begin - Asciidoctor.load_file sample_input_path, options - rescue - flunk %(options argument should not be modified) - end - end - - test 'should not modify attributes Hash argument' do - attributes = {} - attributes.freeze - options = { - :safe => Asciidoctor::SafeMode::SAFE, - :attributes => attributes - } - sample_input_path = fixture_path('sample.asciidoc') - begin - Asciidoctor.load_file sample_input_path, options - rescue - flunk %(attributes argument should not be modified) - end - end - - test 'should track file and line information with blocks if sourcemap option is set' do - doc = Asciidoctor.load_file fixture_path('sample.asciidoc'), :sourcemap => true - - section_1 = doc.sections[0] - assert_equal 'Section A', section_1.title - refute_nil section_1.source_location - assert_equal 'sample.asciidoc', section_1.file - assert_equal 10, section_1.lineno - - section_2 = doc.sections[1] - assert_equal 'Section B', section_2.title - refute_nil section_2.source_location - assert_equal 'sample.asciidoc', section_2.file - assert_equal 18, section_2.lineno - - last_block = section_2.blocks[-1] - assert_equal :ulist, last_block.context - refute_nil last_block.source_location - assert_equal 'sample.asciidoc', last_block.file - assert_equal 23, last_block.lineno - - doc = Asciidoctor.load_file fixture_path('master.adoc'), :sourcemap => true, :safe => :safe - - section_1 = doc.sections[0] - assert_equal 'Chapter A', section_1.title - refute_nil section_1.source_location - assert_equal fixture_path('chapter-a.adoc'), section_1.file - assert_equal 1, section_1.lineno - end - - test 'find_by should return Array of blocks anywhere in document tree that match criteria' do - input = <<-EOS -= Document Title - -preamble - -== Section A - -paragraph - --- -Exhibit A:: -+ -[#tiger.animal] -image::tiger.png[Tiger] --- - -image::shoe.png[Shoe] - -== Section B - -paragraph - EOS - - doc = Asciidoctor.load input - result = doc.find_by :context => :image - assert_equal 2, result.size - assert_equal :image, result[0].context - assert_equal 'tiger.png', result[0].attr('target') - assert_equal :image, result[1].context - assert_equal 'shoe.png', result[1].attr('target') - end - - test 'find_by should return an empty Array if no matches are found' do - input = <<-EOS -paragraph - EOS - doc = Asciidoctor.load input - result = doc.find_by :context => :section - refute_nil result - assert_equal 0, result.size - end - - test 'find_by should return Array of blocks that match style criteria' do - input = <<-EOS -[square] -* one -* two -* three - ---- - -* apples -* bananas -* pears - EOS - - doc = Asciidoctor.load input - result = doc.find_by :context => :ulist, :style => 'square' - assert_equal 1, result.size - assert_equal :ulist, result[0].context - end - - test 'find_by should return Array of blocks that match role criteria' do - input = <<-EOS -[#tiger.animal] -image::tiger.png[Tiger] - -image::shoe.png[Shoe] + test 'should be able to disable toc and sectnums in document header in DocBook backend' do + input = <<~'EOS' + = Document Title + :toc!: + :sectnums!: EOS - - doc = Asciidoctor.load input - result = doc.find_by :context => :image, :role => 'animal' - assert_equal 1, result.size - assert_equal :image, result[0].context - assert_equal 'tiger.png', result[0].attr('target') + doc = document_from_string input, backend: 'docbook' + refute doc.attr?('toc') + refute doc.attr?('sectnums') end - test 'find_by should return the document title section if context selector is :section' do - input = <<-EOS -= Document Title - -preamble + test 'noheader attribute should suppress info element when converting to DocBook' do + input = <<~'EOS' + = Document Title + :noheader: -== Section One - -content + content EOS - doc = Asciidoctor.load input - result = doc.find_by :context => :section - refute_nil result - assert_equal 2, result.size - assert_equal :section, result[0].context - assert_equal 'Document Title', result[0].title - end - - test 'find_by should only return results for which the block argument yields true' do - input = <<-EOS -== Section - -content - -=== Subsection - -content - EOS - doc = Asciidoctor.load input - result = doc.find_by(:context => :section) {|sect| sect.level == 1 } - refute_nil result - assert_equal 1, result.size - assert_equal :section, result[0].context - assert_equal 'Section', result[0].title - end - - test 'find_by should only return one result when matching by id' do - input = <<-EOS -== Section - -content - -[#subsection] -=== Subsection - -content - EOS - doc = Asciidoctor.load input - result = doc.find_by(:context => :section, :id => 'subsection') - refute_nil result - assert_equal 1, result.size - assert_equal :section, result[0].context - assert_equal 'Subsection', result[0].title - end - - test 'find_by should return an empty Array if the id criteria matches but the block argument yields false' do - input = <<-EOS -== Section - -content - -[#subsection] -=== Subsection - -content - EOS - doc = Asciidoctor.load input - result = doc.find_by(:context => :section, :id => 'subsection') {|sect| false } - refute_nil result - assert_equal 0, result.size - end - - test 'find_by should not crash if dlist entry does not have description' do - input = <<-EOS -term without description:: - EOS - doc = Asciidoctor.load input - result = doc.find_by - refute_nil result - assert_equal 3, result.size - assert Asciidoctor::Document === result[0] - assert Asciidoctor::List === result[1] - assert Asciidoctor::ListItem === result[2] - end - end - - context 'Convert APIs' do - test 'should convert source document to string when to_file is false' do - sample_input_path = fixture_path('sample.asciidoc') - - output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :to_file => false - assert !output.empty? - assert_xpath '/html', output, 1 - assert_xpath '/html/head', output, 1 - assert_xpath '/html/body', output, 1 - assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 - assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 - end - - test 'lines in output should be separated by line feed' do - sample_input_path = fixture_path('sample.asciidoc') - - output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :to_file => false - assert !output.empty? - lines = output.split("\n") - assert lines.size == output.split(/\r\n|\r|\n/).size - raw_lengths = lines.map(&:length) - trimmed_lengths = lines.map {|line| line.rstrip.length } - assert raw_lengths == trimmed_lengths - end - - test 'should accept attributes as array' do - sample_input_path = fixture_path('sample.asciidoc') - output = Asciidoctor.convert_file sample_input_path, :attributes => %w(sectnums idprefix idseparator=-), :to_file => false - assert_css '#section-a', output, 1 - end - - test 'should accept attributes as string' do - sample_input_path = fixture_path('sample.asciidoc') - output = Asciidoctor.convert_file sample_input_path, :attributes => 'sectnums idprefix idseparator=-', :to_file => false - assert_css '#section-a', output, 1 - end - - test 'should link to default stylesheet by default when safe mode is SECURE or greater' do - sample_input_path = fixture_path('basic.asciidoc') - output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :to_file => false - assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1 - assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1 - end - - test 'should embed default stylesheet by default if SafeMode is less than SECURE' do - input = <<-EOS -= Document Title - -text - EOS - - output = Asciidoctor.render(input, :safe => Asciidoctor::SafeMode::SERVER, :header_footer => true) - assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1 - assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 0 - stylenode = xmlnodes_at_css 'html:root > head > style', output, 1 - styles = stylenode.first.content - assert !styles.nil? - assert !styles.strip.empty? - end - - test 'should link to default stylesheet by default even if linkcss is unset in document' do - input = <<-EOS -= Document Title -:linkcss!: - -text - EOS - - output = Asciidoctor.render(input, :header_footer => true) - assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1 - assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1 - end - - test 'should link to default stylesheet by default if linkcss is unset' do - input = <<-EOS -= Document Title - -text - EOS - - output = Asciidoctor.render(input, :header_footer => true, :attributes => {'linkcss!' => ''}) - assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1 - assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1 - end - - test 'should embed default stylesheet if safe mode is less than secure and linkcss is unset' do - sample_input_path = fixture_path('basic.asciidoc') - output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :to_file => false, - :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'linkcss!' => ''} - assert_css 'html:root > head > style', output, 1 - stylenode = xmlnodes_at_css 'html:root > head > style', output, 1 - styles = stylenode.first.content - assert !styles.nil? - assert !styles.strip.empty? - end - - test 'should not link to stylesheet if stylesheet is unset' do - input = <<-EOS -= Document Title - -text - EOS - - output = Asciidoctor.render(input, :header_footer => true, :attributes => {'stylesheet!' => ''}) - assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 0 - assert_css 'html:root > head > link[rel="stylesheet"]', output, 0 + result = convert_string input, backend: 'docbook' + assert_xpath '/article', result, 1 + assert_xpath '/article/info', result, 0 end - test 'should link to custom stylesheet if specified in stylesheet attribute' do - input = <<-EOS -= Document Title - -text + test 'should be able to disable section numbering using numbered attribute in document header in DocBook backend' do + input = <<~'EOS' + = Document Title + :numbered!: EOS - - output = Asciidoctor.render(input, :header_footer => true, :attributes => {'stylesheet' => './custom.css'}) - assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 0 - assert_css 'html:root > head > link[rel="stylesheet"][href="./custom.css"]', output, 1 - - output = Asciidoctor.render(input, :header_footer => true, :attributes => {'stylesheet' => 'file:///home/username/custom.css'}) - assert_css 'html:root > head > link[rel="stylesheet"][href="file:///home/username/custom.css"]', output, 1 - end - - test 'should resolve custom stylesheet relative to stylesdir' do - input = <<-EOS -= Document Title - -text - EOS - - output = Asciidoctor.render(input, :header_footer => true, :attributes => {'stylesheet' => 'custom.css', 'stylesdir' => './stylesheets'}) - assert_css 'html:root > head > link[rel="stylesheet"][href="./stylesheets/custom.css"]', output, 1 - end - - test 'should resolve custom stylesheet to embed relative to stylesdir' do - sample_input_path = fixture_path('basic.asciidoc') - output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :safe => Asciidoctor::SafeMode::SAFE, :to_file => false, - :attributes => {'stylesheet' => 'custom.css', 'stylesdir' => './stylesheets', 'linkcss!' => ''} - stylenode = xmlnodes_at_css 'html:root > head > style', output, 1 - styles = stylenode.first.content - assert !styles.nil? - assert !styles.strip.empty? - end - - test 'should convert source file and write result to adjacent file by default' do - sample_input_path = fixture_path('sample.asciidoc') - sample_output_path = fixture_path('sample.html') - begin - Asciidoctor.convert_file sample_input_path - assert File.exist?(sample_output_path) - output = File.read(sample_output_path) - assert !output.empty? - assert_xpath '/html', output, 1 - assert_xpath '/html/head', output, 1 - assert_xpath '/html/body', output, 1 - assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 - assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 - ensure - FileUtils.rm(sample_output_path) - end - end - - test 'should convert source file and write to specified file' do - sample_input_path = fixture_path('sample.asciidoc') - sample_output_path = fixture_path('result.html') - begin - Asciidoctor.convert_file sample_input_path, :to_file => sample_output_path - assert File.exist?(sample_output_path) - output = File.read(sample_output_path) - assert !output.empty? - assert_xpath '/html', output, 1 - assert_xpath '/html/head', output, 1 - assert_xpath '/html/body', output, 1 - assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 - assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 - ensure - FileUtils.rm(sample_output_path) - end - end - - test 'should convert source file and write to specified file in base_dir' do - sample_input_path = fixture_path('sample.asciidoc') - sample_output_path = fixture_path('result.html') - fixture_dir = fixture_path('') - begin - Asciidoctor.convert_file sample_input_path, :to_file => 'result.html', :base_dir => fixture_dir - assert File.exist?(sample_output_path) - output = File.read(sample_output_path) - assert !output.empty? - assert_xpath '/html', output, 1 - assert_xpath '/html/head', output, 1 - assert_xpath '/html/body', output, 1 - assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 - assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 - rescue => e - flunk e.message - ensure - FileUtils.rm(sample_output_path, :force => true) - end - end - - test 'in_place option is ignored when to_file is specified' do - sample_input_path = fixture_path('sample.asciidoc') - sample_output_path = fixture_path('result.html') - begin - Asciidoctor.convert_file sample_input_path, :to_file => sample_output_path, :in_place => true - assert File.exist?(sample_output_path) - ensure - FileUtils.rm(sample_output_path) if File.exist? sample_output_path - end - end - - test 'in_place option is ignored when to_dir is specified' do - sample_input_path = fixture_path('sample.asciidoc') - sample_output_path = fixture_path('sample.html') - begin - Asciidoctor.convert_file sample_input_path, :to_dir => File.dirname(sample_output_path), :in_place => true - assert File.exist?(sample_output_path) - ensure - FileUtils.rm(sample_output_path) if File.exist? sample_output_path - end - end - - test 'output should be relative to to_dir option' do - sample_input_path = fixture_path('sample.asciidoc') - output_dir = File.join(File.dirname(sample_input_path), 'test_output') - Dir.mkdir output_dir if !File.exist? output_dir - sample_output_path = File.join(output_dir, 'sample.html') - begin - Asciidoctor.convert_file sample_input_path, :to_dir => output_dir - assert File.exist? sample_output_path - ensure - FileUtils.rm(sample_output_path) if File.exist? sample_output_path - FileUtils.rmdir output_dir - end - end - - test 'missing directories should be created if mkdirs is enabled' do - sample_input_path = fixture_path('sample.asciidoc') - output_dir = File.join(File.join(File.dirname(sample_input_path), 'test_output'), 'subdir') - sample_output_path = File.join(output_dir, 'sample.html') - begin - Asciidoctor.convert_file sample_input_path, :to_dir => output_dir, :mkdirs => true - assert File.exist? sample_output_path - ensure - FileUtils.rm(sample_output_path) if File.exist? sample_output_path - FileUtils.rmdir output_dir - FileUtils.rmdir File.dirname(output_dir) - end - end - - # TODO need similar test for when to_dir is specified - test 'should raise exception if an attempt is made to overwrite input file' do - sample_input_path = fixture_path('sample.asciidoc') - - assert_raises IOError do - Asciidoctor.convert_file sample_input_path, :attributes => { 'outfilesuffix' => '.asciidoc' } - end - end - - test 'to_file should be relative to to_dir when both given' do - sample_input_path = fixture_path('sample.asciidoc') - base_dir = File.dirname(sample_input_path) - sample_rel_output_path = File.join('test_output', 'result.html') - output_dir = File.dirname(File.join(base_dir, sample_rel_output_path)) - Dir.mkdir output_dir if !File.exist? output_dir - sample_output_path = File.join(base_dir, sample_rel_output_path) - begin - Asciidoctor.convert_file sample_input_path, :to_dir => base_dir, :to_file => sample_rel_output_path - assert File.exist? sample_output_path - ensure - FileUtils.rm(sample_output_path) if File.exist? sample_output_path - FileUtils.rmdir output_dir - end - end - - test 'should not modify options argument' do - options = { - :safe => Asciidoctor::SafeMode::SAFE, - :to_file => false - } - options.freeze - sample_input_path = fixture_path('sample.asciidoc') - begin - Asciidoctor.convert_file sample_input_path, options - rescue - flunk %(options argument should not be modified) - end + doc = document_from_string input, backend: 'docbook' + refute doc.attr?('sectnums') end end context 'Docinfo files' do test 'should include docinfo files for html backend' do - sample_input_path = fixture_path('basic.asciidoc') + sample_input_path = fixture_path('basic.adoc') cases = { - 'docinfo' => { :head_script => 1, :meta => 0, :top_link => 0, :footer_script => 1 }, - 'docinfo=private' => { :head_script => 1, :meta => 0, :top_link => 0, :footer_script => 1 }, - 'docinfo1' => { :head_script => 0, :meta => 1, :top_link => 1, :footer_script => 0 }, - 'docinfo=shared' => { :head_script => 0, :meta => 1, :top_link => 1, :footer_script => 0 }, - 'docinfo2' => { :head_script => 1, :meta => 1, :top_link => 1, :footer_script => 1 }, - 'docinfo docinfo2' => { :head_script => 1, :meta => 1, :top_link => 1, :footer_script => 1 }, - 'docinfo=private,shared' => { :head_script => 1, :meta => 1, :top_link => 1, :footer_script => 1 }, - 'docinfo=private-head' => { :head_script => 1, :meta => 0, :top_link => 0, :footer_script => 0 }, - 'docinfo=shared-head' => { :head_script => 0, :meta => 1, :top_link => 0, :footer_script => 0 }, - 'docinfo=private-footer' => { :head_script => 0, :meta => 0, :top_link => 0, :footer_script => 1 }, - 'docinfo=shared-footer' => { :head_script => 0, :meta => 0, :top_link => 1, :footer_script => 0 }, - 'docinfo=private-head\ ,\ shared-footer' => { :head_script => 1, :meta => 0, :top_link => 1, :footer_script => 0 } + 'docinfo' => { head_script: 1, meta: 0, top_link: 0, footer_script: 1, navbar: 1 }, + 'docinfo=private' => { head_script: 1, meta: 0, top_link: 0, footer_script: 1, navbar: 1 }, + 'docinfo1' => { head_script: 0, meta: 1, top_link: 1, footer_script: 0, navbar: 0 }, + 'docinfo=shared' => { head_script: 0, meta: 1, top_link: 1, footer_script: 0, navbar: 0 }, + 'docinfo2' => { head_script: 1, meta: 1, top_link: 1, footer_script: 1, navbar: 1 }, + 'docinfo docinfo2' => { head_script: 1, meta: 1, top_link: 1, footer_script: 1, navbar: 1 }, + 'docinfo=private,shared' => { head_script: 1, meta: 1, top_link: 1, footer_script: 1, navbar: 1 }, + 'docinfo=private-head' => { head_script: 1, meta: 0, top_link: 0, footer_script: 0, navbar: 0 }, + 'docinfo=private-header' => { head_script: 0, meta: 0, top_link: 0, footer_script: 0, navbar: 1 }, + 'docinfo=shared-head' => { head_script: 0, meta: 1, top_link: 0, footer_script: 0, navbar: 0 }, + 'docinfo=private-footer' => { head_script: 0, meta: 0, top_link: 0, footer_script: 1, navbar: 0 }, + 'docinfo=shared-footer' => { head_script: 0, meta: 0, top_link: 1, footer_script: 0, navbar: 0 }, + 'docinfo=private-head\ ,\ shared-footer' => { head_script: 1, meta: 0, top_link: 1, footer_script: 0, navbar: 0 }, } cases.each do |attr_val, markup| - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => %(linkcss copycss! #{attr_val}) - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: %(linkcss copycss! #{attr_val}) + refute_empty output assert_css 'script[src="modernizr.js"]', output, markup[:head_script] assert_css 'meta[http-equiv="imagetoolbar"]', output, markup[:meta] assert_css 'body > a#top', output, markup[:top_link] assert_css 'body > script', output, markup[:footer_script] + assert_css 'body > nav.navbar', output, markup[:navbar] + assert_css 'body > nav.navbar + #header', output, markup[:navbar] end end + test 'should include docinfo header even if noheader attribute is set' do + sample_input_path = fixture_path('basic.adoc') + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => 'private-header', 'noheader' => '' } + refute_empty output + assert_css 'body > nav.navbar', output, 1 + assert_css 'body > nav.navbar + #content', output, 1 + end + test 'should include docinfo footer even if nofooter attribute is set' do - sample_input_path = fixture_path('basic.asciidoc') - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => '', 'nofooter' => ''} - assert !output.empty? + sample_input_path = fixture_path('basic.adoc') + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '', 'nofooter' => '' } + refute_empty output assert_css 'body > a#top', output, 1 end + test 'should include user docinfo after built-in docinfo' do + sample_input_path = fixture_path 'basic.adoc' + attrs = { 'docinfo' => 'shared', 'source-highlighter' => 'highlight.js', 'linkcss' => '', 'copycss' => nil } + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: :safe, attributes: attrs + assert_css 'link[rel=stylesheet] + meta[http-equiv=imagetoolbar]', output, 1 + assert_css 'meta[http-equiv=imagetoolbar] + *', output, 0 + assert_css 'script + a#top', output, 1 + assert_css 'a#top + *', output, 0 + end + test 'should include docinfo files for html backend with custom docinfodir' do - sample_input_path = fixture_path('basic.asciidoc') + sample_input_path = fixture_path('basic.adoc') - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => '', 'docinfodir' => 'custom-docinfodir'} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => '', 'docinfodir' => 'custom-docinfodir' } + refute_empty output assert_css 'script[src="bootstrap.js"]', output, 1 assert_css 'meta[name="robots"]', output, 0 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => '', 'docinfodir' => 'custom-docinfodir'} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '', 'docinfodir' => 'custom-docinfodir' } + refute_empty output assert_css 'script[src="bootstrap.js"]', output, 0 assert_css 'meta[name="robots"]', output, 1 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => '', 'docinfodir' => './custom-docinfodir'} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '', 'docinfodir' => './custom-docinfodir' } + refute_empty output assert_css 'script[src="bootstrap.js"]', output, 1 assert_css 'meta[name="robots"]', output, 1 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => '', 'docinfodir' => 'custom-docinfodir/subfolder'} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '', 'docinfodir' => 'custom-docinfodir/subfolder' } + refute_empty output assert_css 'script[src="bootstrap.js"]', output, 0 assert_css 'meta[name="robots"]', output, 0 end - test 'should include docinfo files for docbook backend' do - sample_input_path = fixture_path('basic.asciidoc') + test 'should include docinfo files in docbook backend' do + sample_input_path = fixture_path('basic.adoc') - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => '' } + refute_empty output assert_css 'productname', output, 0 assert_css 'copyright', output, 1 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '' } + refute_empty output assert_css 'productname', output, 1 assert_xpath '//xmlns:productname[text()="Asciidoctor™"]', output, 1 assert_css 'edition', output, 1 assert_xpath '//xmlns:edition[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'copyright', output, 0 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '' } + refute_empty output assert_css 'productname', output, 1 assert_xpath '//xmlns:productname[text()="Asciidoctor™"]', output, 1 assert_css 'edition', output, 1 @@ -892,129 +245,149 @@ assert_css 'copyright', output, 1 end + test 'should use header docinfo in place of default header' do + output = Asciidoctor.convert_file fixture_path('sample.adoc'), to_file: false, + standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => 'private-header', 'noheader' => '' } + refute_empty output + assert_css 'article > info', output, 1 + assert_css 'article > info > title', output, 1 + assert_css 'article > info > revhistory', output, 1 + assert_css 'article > info > revhistory > revision', output, 2 + end + test 'should include docinfo footer files for html backend' do - sample_input_path = fixture_path('basic.asciidoc') + sample_input_path = fixture_path('basic.adoc') - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => '' } + refute_empty output assert_css 'body script', output, 1 assert_css 'a#top', output, 0 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '' } + refute_empty output assert_css 'body script', output, 0 assert_css 'a#top', output, 1 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '' } + refute_empty output assert_css 'body script', output, 1 assert_css 'a#top', output, 1 end - test 'should include docinfo footer files for docbook backend' do - sample_input_path = fixture_path('basic.asciidoc') + test 'should include docinfo footer files in DocBook backend' do + sample_input_path = fixture_path('basic.adoc') - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => '' } + refute_empty output assert_css 'article > revhistory', output, 1 assert_xpath '/xmlns:article/xmlns:revhistory/xmlns:revision/xmlns:revnumber[text()="1.0"]', output, 1 # verifies substitutions are performed - assert_css 'glossary#_glossary', output, 0 + assert_css 'glossary', output, 0 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '' } + refute_empty output assert_css 'article > revhistory', output, 0 - assert_css 'glossary#_glossary', output, 1 + assert_css 'glossary[xml|id="_glossary"]', output, 1 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '' } + refute_empty output assert_css 'article > revhistory', output, 1 assert_xpath '/xmlns:article/xmlns:revhistory/xmlns:revision/xmlns:revnumber[text()="1.0"]', output, 1 # verifies substitutions are performed - assert_css 'glossary#_glossary', output, 1 + assert_css 'glossary[xml|id="_glossary"]', output, 1 end # WARNING this test manipulates runtime settings; should probably be run in forked process test 'should force encoding of docinfo files to UTF-8' do - sample_input_path = fixture_path('basic.asciidoc') - - if RUBY_VERSION >= '1.9' - default_external_old = Encoding.default_external - force_encoding_old = Asciidoctor::FORCE_ENCODING - verbose_old = $VERBOSE - end + old_external = Encoding.default_external + old_internal = Encoding.default_internal + old_verbose = $VERBOSE begin - if RUBY_VERSION >= '1.9' - $VERBOSE = nil # disable warnings since we have to modify constants - Encoding.default_external = 'US-ASCII' - Asciidoctor::FORCE_ENCODING = true - end - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => ''} - assert !output.empty? + $VERBOSE = nil # disable warnings since we have to modify constants + Encoding.default_external = Encoding.default_internal = Encoding::IBM437 + sample_input_path = fixture_path('basic.adoc') + output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, + backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => 'private,shared' } + refute_empty output assert_css 'productname', output, 1 + assert_includes output, 'Asciidoctor™' assert_css 'edition', output, 1 assert_xpath '//xmlns:edition[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'copyright', output, 1 ensure - if RUBY_VERSION >= '1.9' - Encoding.default_external = default_external_old - Asciidoctor::FORCE_ENCODING = force_encoding_old - $VERBOSE = verbose_old - end + Encoding.default_external = old_external + Encoding.default_internal = old_internal + $VERBOSE = old_verbose end end test 'should not include docinfo files by default' do - sample_input_path = fixture_path('basic.asciidoc') + sample_input_path = fixture_path('basic.adoc') - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, safe: Asciidoctor::SafeMode::SERVER + refute_empty output assert_css 'script[src="modernizr.js"]', output, 0 assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER + refute_empty output assert_css 'productname', output, 0 assert_css 'copyright', output, 0 end test 'should not include docinfo files if safe mode is SECURE or greater' do - sample_input_path = fixture_path('basic.asciidoc') + sample_input_path = fixture_path('basic.adoc') - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :attributes => {'docinfo2' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, attributes: { 'docinfo2' => '' } + refute_empty output assert_css 'script[src="modernizr.js"]', output, 0 assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :backend => 'docbook', :attributes => {'docinfo2' => ''} - assert !output.empty? + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, backend: 'docbook', attributes: { 'docinfo2' => '' } + refute_empty output assert_css 'productname', output, 0 assert_css 'copyright', output, 0 end - test 'should apply explicit substitutions to docinfo files' do - sample_input_path = fixture_path('subs.adoc') + test 'should substitute attributes in docinfo files by default' do + sample_input_path = fixture_path 'subs.adoc' + using_memory_logger do |logger| + output = Asciidoctor.convert_file sample_input_path, + to_file: false, + standalone: true, + safe: :server, + attributes: { 'docinfo' => '', 'bootstrap-version' => nil, 'linkcss' => '', 'attribute-missing' => 'drop-line' } + refute_empty output + assert_css 'script', output, 0 + assert_xpath %(//meta[@name="copyright"][@content="(C) OpenDevise"]), output, 1 + assert_message logger, :INFO, 'dropping line containing reference to missing attribute: bootstrap-version' + end + end - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => '', 'docinfosubs' => 'attributes,replacements', 'linkcss' => ''} - assert !output.empty? + test 'should apply explicit substitutions to docinfo files' do + sample_input_path = fixture_path 'subs.adoc' + output = Asciidoctor.convert_file sample_input_path, + to_file: false, + standalone: true, + safe: :server, + attributes: { 'docinfo' => '', 'docinfosubs' => 'attributes,replacements', 'linkcss' => '' } + refute_empty output assert_css 'script[src="bootstrap.3.2.0.min.js"]', output, 1 - assert_xpath %(//meta[@name="copyright"][@content="#{entity 169} OpenDevise"]), output, 1 + assert_xpath %(//meta[@name="copyright"][@content="#{decode_char 169} OpenDevise"]), output, 1 end end context 'MathJax' do test 'should add MathJax script to HTML head if stem attribute is set' do - output = render_string '', :attributes => {'stem' => ''} + output = convert_string '', attributes: { 'stem' => '' } assert_match('), {}, :content_model => :raw + create_pass_block parent, %(), {}, content_model: :raw + end +end + +class LegacyPosAttrsBlockMacro < Asciidoctor::Extensions::BlockMacroProcessor + option :pos_attrs, ['target', 'format'] + def process parent, _, attrs + create_image_block parent, { 'target' => %(#{attrs['target']}.#{attrs['format']}) } end end class TemperatureMacro < Asciidoctor::Extensions::InlineMacroProcessor; use_dsl named :degrees - name_attributes 'units' + resolve_attributes '1:units', 'precision=1' def process parent, target, attributes units = attributes['units'] || (parent.document.attr 'temperature-unit', 'C') + precision = attributes['precision'].to_i c = target.to_f case units when 'C' - %(#{c} °C) + create_inline parent, :quoted, %(#{c.round precision} °C), type: :unquoted when 'F' - %(#{c * 1.8 + 32 } °F) + create_inline parent, :quoted, %(#{(c * 1.8 + 32).round precision} °F), type: :unquoted else - c + raise ::ArgumentError, %(Unknown temperature units: #{units}) end end end @@ -123,8 +166,7 @@ end end -class MetaAppDocinfoProcessor < Asciidoctor::Extensions::DocinfoProcessor - use_dsl +class MetaAppDocinfoProcessor < Asciidoctor::Extensions::DocinfoProcessor; use_dsl at_location :head def process document @@ -139,8 +181,65 @@ end end +def create_cat_in_sink_block_macro + Asciidoctor::Extensions.create do + block_macro do + named :cat_in_sink + process do |parent, target, attrs| + image_attrs = {} + unless target.nil_or_empty? + image_attrs['target'] = %(cat-in-sink-day-#{target}.png) + end + if (title = attrs.delete 'title') + image_attrs['title'] = title + end + if (alt = attrs.delete 1) + image_attrs['alt'] = alt + end + create_image_block parent, image_attrs + end + end + end +end + +def create_santa_list_block_macro + Asciidoctor::Extensions.create do + block_macro do + named :santa_list + process do |parent, target| + list = create_list parent, target + guillaume = (create_list_item list, 'Guillaume') + guillaume.add_role('friendly') + guillaume.id = 'santa-list-guillaume' + list << guillaume + robert = (create_list_item list, 'Robert') + robert.add_role('kind') + robert.add_role('contributor') + robert.add_role('java') + list << robert + pepijn = (create_list_item list, 'Pepijn') + pepijn.id = 'santa-list-pepijn' + list << pepijn + dan = (create_list_item list, 'Dan') + dan.add_role('naughty') + dan.id = 'santa-list-dan' + list << dan + sarah = (create_list_item list, 'Sarah') + list << sarah + list + end + end + end +end + context 'Extensions' do context 'Register' do + test 'should not activate registry if no extension groups are registered' do + assert defined? Asciidoctor::Extensions + doc = empty_document + refute doc.extensions?, 'Extensions should not be enabled if not groups are registered' + end + test 'should register extension group class' do begin Asciidoctor::Extensions.register :sample, SampleExtensionGroup @@ -179,7 +278,7 @@ Asciidoctor::Extensions.register :sample, SampleExtensionGroup.new refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size - assert Asciidoctor::Extensions.groups[:sample].is_a? SampleExtensionGroup + assert_kind_of SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end @@ -187,53 +286,95 @@ test 'should register extension block' do begin - Asciidoctor::Extensions.register(:sample) do + Asciidoctor::Extensions.register :sample do end refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size - assert Asciidoctor::Extensions.groups[:sample].is_a? Proc + assert_kind_of Proc, Asciidoctor::Extensions.groups[:sample] + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should coerce group name to symbol when registering' do + begin + Asciidoctor::Extensions.register 'sample', SampleExtensionGroup + refute_nil Asciidoctor::Extensions.groups + assert_equal 1, Asciidoctor::Extensions.groups.size + assert_equal SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end - test 'should get class for top-level class name' do - clazz = Asciidoctor::Extensions.class_for_name('Asciidoctor') - refute_nil clazz - assert_equal Asciidoctor, clazz + test 'should unregister extension group by symbol name' do + begin + Asciidoctor::Extensions.register :sample, SampleExtensionGroup + refute_nil Asciidoctor::Extensions.groups + assert_equal 1, Asciidoctor::Extensions.groups.size + Asciidoctor::Extensions.unregister :sample + assert_equal 0, Asciidoctor::Extensions.groups.size + ensure + Asciidoctor::Extensions.unregister_all + end end - test 'should get class for class name in module' do - clazz = Asciidoctor::Extensions.class_for_name('Asciidoctor::Extensions') - refute_nil clazz - assert_equal Asciidoctor::Extensions, clazz + test 'should unregister extension group by string name' do + begin + Asciidoctor::Extensions.register :sample, SampleExtensionGroup + refute_nil Asciidoctor::Extensions.groups + assert_equal 1, Asciidoctor::Extensions.groups.size + Asciidoctor::Extensions.unregister 'sample' + assert_equal 0, Asciidoctor::Extensions.groups.size + ensure + Asciidoctor::Extensions.unregister_all + end end - test 'should get class for class name resolved from root' do - clazz = Asciidoctor::Extensions.class_for_name('::Asciidoctor::Extensions') - refute_nil clazz - assert_equal Asciidoctor::Extensions, clazz + test 'should unregister multiple extension groups by name' do + begin + Asciidoctor::Extensions.register :sample1, SampleExtensionGroup + Asciidoctor::Extensions.register :sample2, SampleExtensionGroup + refute_nil Asciidoctor::Extensions.groups + assert_equal 2, Asciidoctor::Extensions.groups.size + Asciidoctor::Extensions.unregister :sample1, :sample2 + assert_equal 0, Asciidoctor::Extensions.groups.size + ensure + Asciidoctor::Extensions.unregister_all + end end - test 'should raise exception if cannot find class for name' do + test 'should raise NameError if extension class cannot be resolved from string' do begin - Asciidoctor::Extensions.class_for_name('InvalidModule::InvalidClass') + Asciidoctor::Extensions.register do + block 'foobar' + end + empty_document flunk 'Expecting RuntimeError to be raised' - rescue RuntimeError => e - assert_equal 'Could not resolve class for name: InvalidModule::InvalidClass', e.message + rescue NameError => e + assert_equal 'Could not resolve class for name: foobar', e.message + ensure + Asciidoctor::Extensions.unregister_all end end - test 'should resolve class if class is given' do - clazz = Asciidoctor::Extensions.resolve_class(Asciidoctor::Extensions) - refute_nil clazz - assert_equal Asciidoctor::Extensions, clazz - end + test 'should allow standalone registry to be created but not registered' do + registry = Asciidoctor::Extensions.create 'sample' do + block do + named :whisper + on_context :paragraph + parse_content_as :simple + def process parent, reader, attributes + create_paragraph parent, reader.lines.map(&:downcase), attributes + end + end + end - test 'should resolve class if class from string' do - clazz = Asciidoctor::Extensions.resolve_class('Asciidoctor::Extensions') - refute_nil clazz - assert_equal Asciidoctor::Extensions, clazz + assert_instance_of Asciidoctor::Extensions::Registry, registry + refute_nil registry.groups + assert_equal 1, registry.groups.size + assert_equal 'sample', registry.groups.keys.first + assert_equal 0, Asciidoctor::Extensions.groups.size end end @@ -272,7 +413,7 @@ SampleExtensionGroup.register doc = Asciidoctor::Document.new assert doc.extensions? - assert doc.extensions.is_a? Asciidoctor::Extensions::Registry + assert_kind_of Asciidoctor::Extensions::Registry, doc.extensions ensure Asciidoctor::Extensions.unregister_all end @@ -288,9 +429,9 @@ assert registry.preprocessors? extensions = registry.preprocessors assert_equal 1, extensions.size - assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension - assert extensions.first.instance.is_a? SamplePreprocessor - assert extensions.first.process_method.is_a? ::Method + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first + assert_kind_of SamplePreprocessor, extensions.first.instance + assert_kind_of Method, extensions.first.process_method end test 'should instantiate include processors' do @@ -300,9 +441,9 @@ assert registry.include_processors? extensions = registry.include_processors assert_equal 1, extensions.size - assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension - assert extensions.first.instance.is_a? SampleIncludeProcessor - assert extensions.first.process_method.is_a? ::Method + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first + assert_kind_of SampleIncludeProcessor, extensions.first.instance + assert_kind_of Method, extensions.first.process_method end test 'should instantiate docinfo processors' do @@ -313,21 +454,22 @@ assert registry.docinfo_processors?(:head) extensions = registry.docinfo_processors assert_equal 1, extensions.size - assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension - assert extensions.first.instance.is_a? SampleDocinfoProcessor - assert extensions.first.process_method.is_a? ::Method + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first + assert_kind_of SampleDocinfoProcessor, extensions.first.instance + assert_kind_of Method, extensions.first.process_method end - test 'should instantiate treeprocessors' do + # NOTE intentionally using the legacy names + test 'should instantiate tree processors' do registry = Asciidoctor::Extensions::Registry.new registry.treeprocessor SampleTreeprocessor registry.activate Asciidoctor::Document.new assert registry.treeprocessors? extensions = registry.treeprocessors assert_equal 1, extensions.size - assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension - assert extensions.first.instance.is_a? SampleTreeprocessor - assert extensions.first.process_method.is_a? ::Method + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first + assert_kind_of SampleTreeprocessor, extensions.first.instance + assert_kind_of Method, extensions.first.process_method end test 'should instantiate postprocessors' do @@ -337,9 +479,9 @@ assert registry.postprocessors? extensions = registry.postprocessors assert_equal 1, extensions.size - assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension - assert extensions.first.instance.is_a? SamplePostprocessor - assert extensions.first.process_method.is_a? ::Method + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first + assert_kind_of SamplePostprocessor, extensions.first.instance + assert_kind_of Method, extensions.first.process_method end test 'should instantiate block processor' do @@ -349,16 +491,16 @@ assert registry.blocks? assert registry.registered_for_block? :sample, :paragraph extension = registry.find_block_extension :sample - assert extension.is_a? Asciidoctor::Extensions::ProcessorExtension - assert extension.instance.is_a? SampleBlock - assert extension.process_method.is_a? ::Method + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extension + assert_kind_of SampleBlock, extension.instance + assert_kind_of Method, extension.process_method end test 'should not match block processor for unsupported context' do registry = Asciidoctor::Extensions::Registry.new registry.block SampleBlock, :sample registry.activate Asciidoctor::Document.new - assert !(registry.registered_for_block? :sample, :sidebar) + refute registry.registered_for_block? :sample, :sidebar end test 'should instantiate block macro processor' do @@ -368,9 +510,9 @@ assert registry.block_macros? assert registry.registered_for_block_macro? 'sample' extension = registry.find_block_macro_extension 'sample' - assert extension.is_a? Asciidoctor::Extensions::ProcessorExtension - assert extension.instance.is_a? SampleBlockMacro - assert extension.process_method.is_a? ::Method + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extension + assert_kind_of SampleBlockMacro, extension.instance + assert_kind_of Method, extension.process_method end test 'should instantiate inline macro processor' do @@ -380,9 +522,9 @@ assert registry.inline_macros? assert registry.registered_for_inline_macro? 'sample' extension = registry.find_inline_macro_extension 'sample' - assert extension.is_a? Asciidoctor::Extensions::ProcessorExtension - assert extension.instance.is_a? SampleInlineMacro - assert extension.process_method.is_a? ::Method + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extension + assert_kind_of SampleInlineMacro, extension.instance + assert_kind_of Method, extension.process_method end test 'should allow processors to be registered by a string name' do @@ -392,18 +534,53 @@ assert registry.preprocessors? extensions = registry.preprocessors assert_equal 1, extensions.size - assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first end end context 'Integration' do + test 'can provide extension registry as an option' do + registry = Asciidoctor::Extensions.create do + tree_processor SampleTreeProcessor + end + + doc = document_from_string %(= Document Title\n\ncontent), extension_registry: registry + refute_nil doc.extensions + assert_equal 1, doc.extensions.groups.size + assert doc.extensions.tree_processors? + assert_equal 1, doc.extensions.tree_processors.size + assert_equal 0, Asciidoctor::Extensions.groups.size + end + + # NOTE I'm not convinced we want to continue to support this use case + test 'can provide extension registry created without any groups as option' do + registry = Asciidoctor::Extensions.create + registry.tree_processor SampleTreeProcessor + + doc = document_from_string %(= Document Title\n\ncontent), extension_registry: registry + refute_nil doc.extensions + assert_equal 0, doc.extensions.groups.size + assert doc.extensions.tree_processors? + assert_equal 1, doc.extensions.tree_processors.size + assert_equal 0, Asciidoctor::Extensions.groups.size + end + + test 'can provide extensions proc as option' do + doc = document_from_string %(= Document Title\n\ncontent), extensions: proc { tree_processor SampleTreeProcessor } + refute_nil doc.extensions + assert_equal 1, doc.extensions.groups.size + assert doc.extensions.tree_processors? + assert_equal 1, doc.extensions.tree_processors.size + assert_equal 0, Asciidoctor::Extensions.groups.size + end + test 'should invoke preprocessors before parsing document' do - input = <<-EOS -junk line + input = <<~'EOS' + junk line -= Document Title + = Document Title -sample content + sample content EOS begin @@ -421,13 +598,13 @@ end end - test 'should invoke include processor to process include macro' do - input = <<-EOS -before + test 'should invoke include processor to process include directive' do + input = <<~'EOS' + before -include::lorem-ipsum.txt[] + include::lorem-ipsum.txt[] -after + after EOS begin @@ -435,60 +612,86 @@ include_processor BoilerplateTextIncludeProcessor end - result = render_string input, :safe => :server + # a custom include processor is not affected by the safe mode + result = convert_string input, safe: :secure assert_css '.paragraph > p', result, 3 - assert result.include?('before') - assert result.include?('Lorem ipsum') - assert result.include?('after') + assert_includes result, 'before' + assert_includes result, 'Lorem ipsum' + assert_includes result, 'after' ensure Asciidoctor::Extensions.unregister_all end end - test 'should call include processor to process include directive' do - input = <<-EOS -first line + test 'should invoke include processor if it offers to handle include directive' do + input = <<~'EOS' + include::skip-me.adoc[] + line after skip + + include::include-file.adoc[] -include::include-file.asciidoc[] + include::fixtures/grandchild-include.adoc[] -last line + last line EOS - # Safe Mode is not required here - document = empty_document :base_dir => File.expand_path(File.dirname(__FILE__)) - document.extensions.include_processor do - process do |doc, reader, target, attributes| - # demonstrate that push_include normalizes endlines - content = ["include target:: #{target}\n", "\n", "middle line\n"] - reader.push_include content, target, target, 1, attributes + registry = Asciidoctor::Extensions.create do + include_processor do + handles? do |target| + target == 'skip-me.adoc' + end + + process do |doc, reader, target, attributes| + end + end + + include_processor do + handles? do |target| + target == 'include-file.adoc' + end + + process do |doc, reader, target, attributes| + # demonstrates that push_include normalizes newlines + content = [ + %(found include target '#{target}' at line #{reader.cursor_at_prev_line.lineno}\r\n), + %(\r\n), + %(middle line\r\n) + ] + reader.push_include content, target, target, 1, attributes + end end end - reader = Asciidoctor::PreprocessorReader.new document, input + # safe mode only required for built-in include processor + document = empty_document base_dir: testdir, extension_registry: registry, safe: :safe + reader = Asciidoctor::PreprocessorReader.new document, input, nil, normalize: true lines = [] lines << reader.read_line + assert_equal 'line after skip', lines.last lines << reader.read_line lines << reader.read_line - assert_equal 'include target:: include-file.asciidoc', lines.last - assert_equal 'include-file.asciidoc: line 2', reader.line_info + assert_equal 'found include target \'include-file.adoc\' at line 4', lines.last + assert_equal 'include-file.adoc: line 2', reader.line_info while reader.has_more_lines? lines << reader.read_line end - source = lines * ::Asciidoctor::EOL - assert_match(/^include target:: include-file.asciidoc$/, source) + source = lines * ::Asciidoctor::LF + assert_match(/^found include target 'include-file.adoc' at line 4$/, source) assert_match(/^middle line$/, source) + assert_match(/^last line of grandchild$/, source) + assert_match(/^last line$/, source) end - test 'should invoke treeprocessors after parsing document' do - input = <<-EOS -= Document Title -Doc Writer + test 'should invoke tree processors after parsing document' do + input = <<~'EOS' + = Document Title + Doc Writer -content + content EOS begin Asciidoctor::Extensions.register do - treeprocessor ReplaceAuthorTreeprocessor + tree_processor ReplaceAuthorTreeProcessor end doc = document_from_string input @@ -498,17 +701,36 @@ end end - test 'should allow treeprocessor to replace tree' do - input = <<-EOS -= Original Document -Doc Writer + test 'should set source_location on document before invoking tree processors' do + begin + Asciidoctor::Extensions.register do + tree_processor do + process do |doc| + para = create_paragraph doc.blocks.last.parent, %(file: #{doc.file}, lineno: #{doc.lineno}), {} + doc << para + end + end + end + + sample_doc = fixture_path 'sample.adoc' + doc = Asciidoctor.load_file sample_doc, sourcemap: true + assert_includes doc.convert, 'file: sample.adoc, lineno: 1' + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should allow tree processor to replace tree' do + input = <<~'EOS' + = Original Document + Doc Writer -content + content EOS begin Asciidoctor::Extensions.register do - treeprocessor ReplaceTreeTreeprocessor + tree_processor ReplaceTreeTreeProcessor end doc = document_from_string input @@ -518,11 +740,84 @@ end end - test 'should invoke postprocessors after rendering document' do - input = <<-EOS -* one -* two -* three + test 'should honor block title assigned in tree processor' do + input = <<~'EOS' + = Document Title + :!example-caption: + + .Old block title + ==== + example block content + ==== + EOS + + old_title = nil + begin + Asciidoctor::Extensions.register do + tree_processor do + process do |doc| + ex = (doc.find_by context: :example)[0] + old_title = ex.title + ex.title = 'New block title' + end + end + end + + doc = document_from_string input + assert_equal 'Old block title', old_title + assert_equal 'New block title', (doc.find_by context: :example)[0].title + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should be able to register preferred tree processor' do + begin + Asciidoctor::Extensions.register do + tree_processor do + process do |doc| + doc << (create_paragraph doc, 'd', {}) + nil + end + end + + tree_processor do + prefer + process do |doc| + doc << (create_paragraph doc, 'c', {}) + nil + end + end + + prefer :tree_processor do + process do |doc| + doc << (create_paragraph doc, 'b', {}) + nil + end + end + + prefer (tree_processor do + process do |doc| + doc << (create_paragraph doc, 'a', {}) + nil + end + end) + + prefer :tree_processor, SelfSigningTreeProcessor + end + + (doc = empty_document).convert + assert_equal %w(SelfSigningTreeProcessor a b c d), doc.blocks.map(&:lines).map(&:first) + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should invoke postprocessors after converting document' do + input = <<~'EOS' + * one + * two + * three EOS begin @@ -530,246 +825,1019 @@ postprocessor StripAttributesPostprocessor end - output = render_string input + output = convert_string input refute_match(/
    /, output) ensure Asciidoctor::Extensions.unregister_all end end - test 'should invoke processor for custom block' do - input = <<-EOS -[yell] -Hi there! + test 'should yield to document processor block if block has non-zero arity' do + input = <<~'EOS' + hi! EOS begin Asciidoctor::Extensions.register do - block UppercaseBlock + tree_processor do |processor| + processor.process do |doc| + doc << (create_paragraph doc, 'bye!', {}) + end + end end - output = render_embedded_string input - assert_xpath '//p', output, 1 - assert_xpath '//p[text()="HI THERE!"]', output, 1 + output = convert_string_to_embedded input + assert_xpath '//p', output, 2 + assert_xpath '//p[text()="hi!"]', output, 1 + assert_xpath '//p[text()="bye!"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end - test 'should invoke processor for custom block macro' do - input = <<-EOS -snippet::12345[] + test 'should invoke processor for custom block' do + input = <<~'EOS' + [yell] + Hi there! + + [yell,chars=aeiou] + Hi there! EOS begin Asciidoctor::Extensions.register do - block_macro SnippetMacro, :snippet + block UppercaseBlock end - output = render_embedded_string input - assert output.include?('') + output = convert_string_to_embedded input + assert_xpath '//p', output, 2 + assert_xpath '(//p)[1][text()="HI THERE!"]', output, 1 + assert_xpath '(//p)[2][text()="hI thErE!"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end - test 'should invoke processor for custom inline macro' do + test 'should invoke processor for custom block in an AsciiDoc table cell' do + input = <<~'EOS' + |=== + a| + [yell] + Hi there! + |=== + EOS + begin Asciidoctor::Extensions.register do - inline_macro TemperatureMacro, :degrees + block UppercaseBlock end - output = render_embedded_string 'Room temperature is degrees:25[C].', :attributes => {'temperature-unit' => 'F'} - assert output.include?('Room temperature is 25.0 °C.') - - output = render_embedded_string 'Room temperature is degrees:25[].', :attributes => {'temperature-unit' => 'F'} - assert output.include?('Room temperature is 77.0 °F.') + output = convert_string_to_embedded input + assert_xpath '/table//p', output, 1 + assert_xpath '/table//p[text()="HI THERE!"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end - test 'should resolve regexp for inline macro lazily' do + test 'should yield to syntax processor block if block has non-zero arity' do + input = <<~'EOS' + [eval] + .... + 'yolo' * 5 + .... + EOS + begin Asciidoctor::Extensions.register do - inline_macro do - named :label - using_format :short - process do |parent, target| - %() + block :eval do |processor| + processor.on_context :literal + processor.process do |parent, reader, attrs| + create_paragraph parent, (eval reader.read_lines[0]), {} end end end - output = render_embedded_string 'label:[Checkbox]' - assert output.include?('') + output = convert_string_to_embedded input + assert_xpath '//p[text()="yoloyoloyoloyoloyolo"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end - test 'should not carry over attributes if block processor returns nil' do + test 'should pass cloaked context in attributes passed to process method of custom block' do + input = <<~'EOS' + [custom] + **** + sidebar + **** + EOS + + cloaked_context = nil begin Asciidoctor::Extensions.register do - block do - named :skip - on_context :paragraph - parse_content_as :raw - process do |parent, reader, attrs| + block :custom do + on_context :sidebar + process do |doc, reader, attrs| + cloaked_context = attrs['cloaked-context'] nil end end end - input = <<-EOS -.unused title -[skip] -not rendered - --- -rendered --- - EOS - doc = document_from_string input - assert_equal 1, doc.blocks.size - assert_nil doc.blocks[0].attributes['title'] + + convert_string_to_embedded input + assert_equal :sidebar, cloaked_context ensure Asciidoctor::Extensions.unregister_all end end - test 'should pass attributes by value to block processor' do + test 'should invoke processor for custom block macro' do + input = 'snippet::12345[mode=edit]' + begin Asciidoctor::Extensions.register do - block do - named :foo - on_context :paragraph - parse_content_as :raw - process do |parent, reader, attrs| - original_attrs = attrs.dup - attrs.delete('title') - create_paragraph parent, reader.read_lines, original_attrs.merge('id' => 'value') - end - end + block_macro SnippetMacro, :snippet end - input = <<-EOS -.title -[foo] -content - EOS - doc = document_from_string input - assert_equal 1, doc.blocks.size - assert_equal 'title', doc.blocks[0].attributes['title'] - assert_equal 'value', doc.blocks[0].id + + output = convert_string_to_embedded input + assert_includes output, '' ensure Asciidoctor::Extensions.unregister_all end end - test 'parse_content should not share attributes between parsed blocks' do + test 'should substitute attributes in target of custom block macro' do + input = 'snippet::{gist-id}[mode=edit]' + begin Asciidoctor::Extensions.register do - block do - named :wrap - on_context :open - process do |parent, reader, attrs| - wrap = create_open_block parent, nil, attrs - parse_content wrap, reader.read_lines - end - end + block_macro SnippetMacro, :snippet end - input = <<-EOS -[wrap] --- -[foo=bar] -==== -content -==== - -[baz=qux] -==== -content -==== --- - EOS - doc = document_from_string input - assert_equal 1, doc.blocks.size - wrap = doc.blocks[0] - assert_equal 2, wrap.blocks.size - assert_equal 2, wrap.blocks[0].attributes.size - assert_equal 2, wrap.blocks[1].attributes.size - assert_nil wrap.blocks[1].attributes['foo'] + + output = convert_string_to_embedded input, attributes: { 'gist-id' => '12345' } + assert_includes output, '' ensure Asciidoctor::Extensions.unregister_all end end - test 'should add docinfo to document' do - input = <<-EOS -= Document Title + test 'should log debug message if custom block macro is unknown' do + input = 'unknown::[]' + using_memory_logger Logger::Severity::DEBUG do |logger| + convert_string_to_embedded input + assert_message logger, :DEBUG, ': line 1: unknown name for block macro: unknown', Hash + end + end -sample content + test 'should drop block macro line if target references missing attribute and attribute-missing is drop-line' do + input = <<~'EOS' + [.rolename] + snippet::{gist-ns}12345[mode=edit] + + following paragraph EOS begin Asciidoctor::Extensions.register do - docinfo_processor MetaRobotsDocinfoProcessor + block_macro SnippetMacro, :snippet end - doc = document_from_string input, :safe => :server - assert_equal '', doc.docinfo + doc, output = nil, nil + using_memory_logger do |logger| + doc = document_from_string input, attributes: { 'attribute-missing' => 'drop-line' } + assert_equal 1, doc.blocks.size + assert_equal :paragraph, doc.blocks[0].context + output = doc.convert + assert_message logger, :INFO, 'dropping line containing reference to missing attribute: gist-ns' + end + assert_css '.paragraph', output, 1 + assert_css '.rolename', output, 0 ensure Asciidoctor::Extensions.unregister_all end end + test 'should invoke processor for custom block macro in an AsciiDoc table cell' do + input = <<~'EOS' + |=== + a|message::hi[] + |=== + EOS - test 'should add multiple docinfo to document' do - input = <<-EOS -= Document Title + begin + Asciidoctor::Extensions.register do + block_macro :message do + process do |parent, target, attrs| + create_paragraph parent, target.upcase, {} + end + end + end -sample content - EOS + output = convert_string_to_embedded input + assert_xpath '/table//p[text()="HI"]', output, 1 + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should match short form of block macro' do + input = 'custom-toc::[]' + + resolved_target = nil begin Asciidoctor::Extensions.register do - docinfo_processor MetaAppDocinfoProcessor - docinfo_processor MetaRobotsDocinfoProcessor, :position => :>> - docinfo_processor do - at_location :footer - process do |doc| - '' + block_macro do + named 'custom-toc' + process do |parent, target, attrs| + resolved_target = target + create_pass_block parent, '', {}, content_model: :raw end end end - doc = document_from_string input, :safe => :server - assert_equal ' -', doc.docinfo - assert_equal '', doc.docinfo(:footer) + output = convert_string_to_embedded input + assert_equal '', output + assert_equal '', resolved_target ensure Asciidoctor::Extensions.unregister_all end end + test 'should fail to convert if name of block macro is illegal' do + input = 'illegal name::target[]' - test 'should append docinfo to document' do begin Asciidoctor::Extensions.register do - docinfo_processor MetaRobotsDocinfoProcessor + block_macro do + named 'illegal name' + process do |parent, target, attrs| + nil + end + end end - sample_input_path = fixture_path('basic.asciidoc') - output = Asciidoctor.convert_file sample_input_path, :to_file => false, - :header_footer => true, - :safe => Asciidoctor::SafeMode::SERVER, - :attributes => {'docinfo' => ''} - assert !output.empty? - assert_css 'script[src="modernizr.js"]', output, 1 - assert_css 'meta[name="robots"]', output, 1 - assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 + assert_raises ArgumentError do + convert_string_to_embedded input + end + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should honor legacy :pos_attrs option set via static method' do + begin + Asciidoctor::Extensions.register do + block_macro LegacyPosAttrsBlockMacro, :diag + end + + result = convert_string_to_embedded 'diag::[filename,png]' + assert_css 'img[src="filename.png"]', result, 1 ensure Asciidoctor::Extensions.unregister_all end end + + test 'should honor legacy :pos_attrs option set via DSL' do + begin + Asciidoctor::Extensions.register do + block_macro :diag do + option :pos_attrs, ['target', 'format'] + process do |parent, _, attrs| + create_image_block parent, { 'target' => %(#{attrs['target']}.#{attrs['format']}) } + end + end + end + + result = convert_string_to_embedded 'diag::[filename,png]' + assert_css 'img[src="filename.png"]', result, 1 + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should be able to set header attribute in block macro processor' do + begin + Asciidoctor::Extensions.register do + block_macro do + named :attribute + resolves_attributes '1:value' + process do |parent, target, attrs| + parent.document.set_attr target, attrs['value'] + nil + end + end + block_macro do + named :header_attribute + resolves_attributes '1:value' + process do |parent, target, attrs| + parent.document.set_header_attribute target, attrs['value'] + nil + end + end + end + input = <<~'EOS' + attribute::yin[yang] + + header_attribute::foo[bar] + EOS + doc = document_from_string input + assert_nil doc.attr 'yin' + assert_equal 'bar', (doc.attr 'foo') + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should invoke processor for custom inline macro' do + begin + Asciidoctor::Extensions.register do + inline_macro TemperatureMacro, :deg + end + + output = convert_string_to_embedded 'Room temperature is deg:25[C,precision=0].', attributes: { 'temperature-unit' => 'F' } + assert_includes output, 'Room temperature is 25 °C.' + + output = convert_string_to_embedded 'Normal body temperature is deg:37[].', attributes: { 'temperature-unit' => 'F' } + assert_includes output, 'Normal body temperature is 98.6 °F.' + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should resolve regexp for inline macro lazily' do + begin + Asciidoctor::Extensions.register do + inline_macro do + named :label + match_format :short + parse_content_as :text + process do |parent, _, attrs| + create_inline_pass parent, %() + end + end + end + + output = convert_string_to_embedded 'label:[Checkbox]' + assert_includes output, '' + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should map unparsed attrlist to target when format is short' do + begin + Asciidoctor::Extensions.register do + inline_macro do + named :label + match_format :short + process do |parent, target| + create_inline_pass parent, %() + end + end + end + + output = convert_string_to_embedded 'label:[Checkbox]' + assert_includes output, '' + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should parse text in square brackets as attrlist by default' do + begin + Asciidoctor::Extensions.register do + inline_macro do + named :json + match_format :short + process do |parent, _, attrs| + create_inline_pass parent, %({ #{attrs.map {|k, v| %["#{k}": "#{v}"] }.join ', '} }) + end + end + + inline_macro do + named :data + process do |parent, target, attrs| + if target == 'json' + create_inline_pass parent, %({ #{attrs.map {|k, v| %["#{k}": "#{v}"] }.join ', '} }) + else + nil + end + end + end + end + + output = convert_string_to_embedded 'json:[a=A,b=B,c=C]', doctype: :inline + assert_equal '{ "a": "A", "b": "B", "c": "C" }', output + output = convert_string_to_embedded 'data:json[a=A,b=B,c=C]', doctype: :inline + assert_equal '{ "a": "A", "b": "B", "c": "C" }', output + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should assign captures correctly for inline macros' do + begin + Asciidoctor::Extensions.register do + inline_macro do + named :short_attributes + match_format :short + resolve_attributes '1:name' + process do |parent, target, attrs| + create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) + end + end + + inline_macro do + named :short_text + match_format :short + resolve_attributes false + process do |parent, target, attrs| + create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) + end + end + + inline_macro do + named :'full-attributes' + resolve_attributes '1:name' => nil + process do |parent, target, attrs| + create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) + end + end + + inline_macro do + named :'full-text' + resolve_attributes false + process do |parent, target, attrs| + create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) + end + end + + inline_macro do + named :@short_match + match %r/@(\w+)/ + resolve_attributes false + process do |parent, target, attrs| + create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) + end + end + end + + input = <<~'EOS' + [subs=normal] + ++++ + short_attributes:[] + short_attributes:[value,key=val] + short_text:[] + short_text:[[text\]] + full-attributes:target[] + full-attributes:target[value,key=val] + full-text:target[] + full-text:target[[text\]] + @target + ++++ + EOS + expected = <<~'EOS'.chop + target="", attributes={} + target="value,key=val", attributes={1=>"value", "key"=>"val", "name"=>"value"} + target="", attributes={"text"=>""} + target="[text]", attributes={"text"=>"[text]"} + target="target", attributes={} + target="target", attributes={1=>"value", "key"=>"val", "name"=>"value"} + target="target", attributes={"text"=>""} + target="target", attributes={"text"=>"[text]"} + target="target", attributes={} + EOS + output = convert_string_to_embedded input + assert_equal expected, output + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should invoke convert on return value if value is an inline node' do + begin + Asciidoctor::Extensions.register do + inline_macro do + named :mention + resolve_attributes false + process do |parent, target, attrs| + if (text = attrs['text']).empty? + text = %(@#{target}) + end + create_anchor parent, text, type: :link, target: %(https://github.com/#{target}) + end + end + end + + output = convert_string_to_embedded 'mention:mojavelinux[Dan]' + assert_includes output, 'Dan' + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should allow return value of inline macro to be nil' do + begin + Asciidoctor::Extensions.register do + inline_macro do + named :skipme + match_format :short + process do + nil + end + end + end + + using_memory_logger do |logger| + output = convert_string_to_embedded '-skipme:[]-', doctype: :inline + assert_equal '--', output + assert_empty logger + end + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should warn if return value of inline macro is a string' do + begin + Asciidoctor::Extensions.register do + inline_macro do + named :say + process do |parent, target, attrs| + target + end + end + end + + using_memory_logger do |logger| + output = convert_string_to_embedded 'say:yo[]', doctype: :inline + assert_equal 'yo', output + assert_message logger, :INFO, 'expected substitution value for custom inline macro to be of type Inline; got String: say:yo[]' + end + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should not apply subs to inline node returned by process method by default' do + begin + Asciidoctor::Extensions.register do + inline_macro do + named :say + process do |parent, target, attrs| + create_inline parent, :quoted, %(*#{target}*), type: :emphasis + end + end + end + + output = convert_string_to_embedded 'say:yo[]', doctype: :inline + assert_equal '*yo*', output + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should apply specified subs to inline node returned by process method' do + begin + Asciidoctor::Extensions.register do + inline_macro do + named :say + process do |parent, target, attrs| + create_inline_pass parent, %(*#{target}*), attributes: { 'subs' => :normal } + end + end + end + + output = convert_string_to_embedded 'say:yo[]', doctype: :inline + assert_equal 'yo', output + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should prefer attributes parsed from inline macro over default attributes' do + begin + Asciidoctor::Extensions.register do + inline_macro :attrs do + match_format :short + default_attributes 1 => 'a', 2 => 'b', 'foo' => 'baz' + positional_attributes 'a', 'b' + process do |parent, _, attrs| + create_inline_pass parent, %(a=#{attrs['a']},2=#{attrs[2]},b=#{attrs['b'] || 'nil'},foo=#{attrs['foo']}) + end + end + end + + output = convert_string_to_embedded 'attrs:[A,foo=bar]', doctype: :inline + # note that default attributes aren't considered when mapping positional attributes + assert_equal 'a=A,2=b,b=nil,foo=bar', output + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should not carry over attributes if block processor returns nil' do + begin + Asciidoctor::Extensions.register do + block do + named 'skip-me' + on_context :paragraph + parse_content_as :raw + process do |parent, reader, attrs| + nil + end + end + end + input = <<~'EOS' + .unused title + [skip-me] + not shown + + -- + shown + -- + EOS + doc = document_from_string input + assert_equal 1, doc.blocks.size + assert_nil doc.blocks[0].attributes['title'] + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should not invoke process method or carry over attributes if block processor declares skip content model' do + begin + process_method_called = false + Asciidoctor::Extensions.register do + block do + named :ignore + on_context :paragraph + parse_content_as :skip + process do |parent, reader, attrs| + process_method_called = true + nil + end + end + end + input = <<~'EOS' + .unused title + [ignore] + not shown + + -- + shown + -- + EOS + doc = document_from_string input + refute process_method_called + assert_equal 1, doc.blocks.size + assert_nil doc.blocks[0].attributes['title'] + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should pass attributes by value to block processor' do + begin + Asciidoctor::Extensions.register do + block do + named :foo + on_context :paragraph + parse_content_as :raw + process do |parent, reader, attrs| + original_attrs = attrs.dup + attrs.delete('title') + create_paragraph parent, reader.read_lines, original_attrs.merge('id' => 'value') + end + end + end + input = <<~'EOS' + .title + [foo] + content + EOS + doc = document_from_string input + assert_equal 1, doc.blocks.size + assert_equal 'title', doc.blocks[0].attributes['title'] + assert_equal 'value', doc.blocks[0].id + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'parse_content should not share attributes between parsed blocks' do + begin + Asciidoctor::Extensions.register do + block do + named :wrap + on_context :open + process do |parent, reader, attrs| + wrap = create_open_block parent, nil, attrs + parse_content wrap, reader.read_lines + end + end + end + input = <<~'EOS' + [wrap] + -- + [foo=bar] + ==== + content + ==== + + [baz=qux] + ==== + content + ==== + -- + EOS + doc = document_from_string input + assert_equal 1, doc.blocks.size + wrap = doc.blocks[0] + assert_equal 2, wrap.blocks.size + assert_equal 2, wrap.blocks[0].attributes.size + assert_equal 2, wrap.blocks[1].attributes.size + assert_nil wrap.blocks[1].attributes['foo'] + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'can use parse_attributes to parse attrlist' do + begin + parsed_attrs = nil + Asciidoctor::Extensions.register do + block do + named :attrs + on_context :open + process do |parent, reader, attrs| + parsed_attrs = parse_attributes parent, reader.read_line, positional_attributes: ['a', 'b'] + parsed_attrs.update parse_attributes parent, 'foo={foo}', sub_attributes: true + nil + end + end + end + input = <<~'EOS' + :foo: bar + + [attrs] + -- + a,b,c,key=val + -- + EOS + convert_string_to_embedded input + assert_equal 'a', parsed_attrs['a'] + assert_equal 'b', parsed_attrs['b'] + assert_equal 'val', parsed_attrs['key'] + assert_equal 'bar', parsed_attrs['foo'] + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'create_section should set up all section properties' do + begin + sect = nil + Asciidoctor::Extensions.register do + block_macro do + named :sect + process do |parent, target, attrs| + opts = (level = attrs.delete 'level') ? { level: level.to_i } : {} + attrs['id'] = false if attrs['id'] == 'false' + parent = parent.parent if parent.context == :preamble + sect = create_section parent, 'Section Title', attrs, opts + nil + end + end + end + + input_tpl = <<~'EOS' + = Document Title + :doctype: book + :sectnums: + + sect::[%s] + EOS + + { + '' => ['chapter', 1, false, true, '_section_title'], + 'level=0' => ['part', 0, false, false, '_section_title'], + 'level=0,alt' => ['part', 0, false, true, '_section_title', { 'partnums' => '' }], + 'level=0,style=appendix' => ['appendix', 1, true, true, '_section_title'], + 'style=appendix' => ['appendix', 1, true, true, '_section_title'], + 'style=glossary' => ['glossary', 1, true, false, '_section_title'], + 'style=glossary,alt' => ['glossary', 1, true, :chapter, '_section_title', { 'sectnums' => 'all' }], + 'style=abstract' => ['chapter', 1, false, true, '_section_title'], + 'id=section-title' => ['chapter', 1, false, true, 'section-title'], + 'id=false' => ['chapter', 1, false, true, nil] + }.each do |attrlist, (expect_sectname, expect_level, expect_special, expect_numbered, expect_id, extra_attrs)| + input = input_tpl % attrlist + document_from_string input, safe: :server, attributes: extra_attrs + assert_equal expect_sectname, sect.sectname + assert_equal expect_level, sect.level + assert_equal expect_special, sect.special + assert_equal expect_numbered, sect.numbered + if expect_id + assert_equal expect_id, sect.id + else + assert_nil sect.id + end + end + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should add docinfo to document' do + input = <<~'EOS' + = Document Title + + sample content + EOS + + begin + Asciidoctor::Extensions.register do + docinfo_processor MetaRobotsDocinfoProcessor + end + + doc = document_from_string input + assert_equal Asciidoctor::SafeMode::SECURE, doc.safe + assert_equal '', doc.docinfo + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should add multiple docinfo to document' do + input = <<~'EOS' + = Document Title + + sample content + EOS + + begin + Asciidoctor::Extensions.register do + docinfo_processor MetaAppDocinfoProcessor + docinfo_processor MetaRobotsDocinfoProcessor, position: :>> + docinfo_processor do + at_location :footer + process do |doc| + '' + end + end + end + + doc = document_from_string input, safe: :server + assert_equal %(\n), doc.docinfo + assert_equal '', doc.docinfo(:footer) + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should append docinfo to document' do + begin + Asciidoctor::Extensions.register do + docinfo_processor MetaRobotsDocinfoProcessor + end + sample_input_path = fixture_path('basic.adoc') + + output = Asciidoctor.convert_file sample_input_path, to_file: false, + standalone: true, + safe: Asciidoctor::SafeMode::SERVER, + attributes: { 'docinfo' => '' } + refute_empty output + assert_css 'script[src="modernizr.js"]', output, 1 + assert_css 'meta[name="robots"]', output, 1 + assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should return extension instance after registering' do + begin + exts = [] + Asciidoctor::Extensions.register do + exts.push preprocessor SamplePreprocessor + exts.push include_processor SampleIncludeProcessor + exts.push tree_processor SampleTreeProcessor + exts.push docinfo_processor SampleDocinfoProcessor + exts.push postprocessor SamplePostprocessor + end + empty_document + exts.each do |ext| + assert_kind_of Asciidoctor::Extensions::ProcessorExtension, ext + end + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should raise an exception if mandatory target attribute is not provided for image block' do + input = 'cat_in_sink::[]' + exception = assert_raises ArgumentError do + convert_string_to_embedded input, extension_registry: create_cat_in_sink_block_macro + end + assert_match(/target attribute is required/, exception.message) + end + + test 'should assign alt attribute to image block if alt is not provided' do + input = 'cat_in_sink::25[]' + doc = document_from_string input, standalone: false, extension_registry: create_cat_in_sink_block_macro + image = doc.blocks[0] + assert_equal 'cat in sink day 25', (image.attr 'alt') + assert_equal 'cat in sink day 25', (image.attr 'default-alt') + output = doc.convert + assert_includes output, 'cat in sink day 25' + end + + test 'should create an image block if mandatory attributes are provided' do + input = 'cat_in_sink::30[cat in sink (yes)]' + doc = document_from_string input, standalone: false, extension_registry: create_cat_in_sink_block_macro + image = doc.blocks[0] + assert_equal 'cat in sink (yes)', (image.attr 'alt') + refute(image.attr? 'default-alt') + output = doc.convert + assert_includes output, 'cat in sink (yes)' + end + + test 'should not assign caption on image block if title is not set on custom block macro' do + input = 'cat_in_sink::30[]' + doc = document_from_string input, standalone: false, extension_registry: create_cat_in_sink_block_macro + output = doc.convert + assert_xpath '/*[@class="imageblock"]/*[@class="title"]', output, 0 + end + + test 'should assign caption on image block if title is set on custom block macro' do + input = <<~'EOS' + .Cat in Sink? + cat_in_sink::30[] + EOS + doc = document_from_string input, standalone: false, extension_registry: create_cat_in_sink_block_macro + output = doc.convert + assert_xpath '/*[@class="imageblock"]/*[@class="title"][text()="Figure 1. Cat in Sink?"]', output, 1 + end + + test 'should not fail if alt attribute is not set on block image node' do + begin + Asciidoctor::Extensions.register do + block_macro :no_alt do + process do |parent, target, attrs| + create_block parent, 'image', nil, { 'target' => 'picture.jpg' } + end + end + end + + output = Asciidoctor.convert 'no_alt::[]' + assert_include '', output + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should not fail if alt attribute is not set on inline image node' do + begin + Asciidoctor::Extensions.register do + inline_macro :no_alt do + match_format :short + process do |parent, target, attrs| + create_inline parent, 'image', nil, target: 'picture.jpg' + end + end + end + + output = Asciidoctor.convert 'no_alt:[]' + assert_include '', output + ensure + Asciidoctor::Extensions.unregister_all + end + end + + test 'should assign id and role on list items unordered' do + input = 'santa_list::ulist[]' + doc = document_from_string input, standalone: false, extension_registry: create_santa_list_block_macro + output = doc.convert + assert_xpath '/div[@class="ulist"]/ul/li[@class="friendly"][@id="santa-list-guillaume"]', output, 1 + assert_xpath '/div[@class="ulist"]/ul/li[@class="kind contributor java"]', output, 1 + assert_xpath '/div[@class="ulist"]/ul/li[@class="kind contributor java"][not(@id)]', output, 1 + assert_xpath '/div[@class="ulist"]/ul/li[@id="santa-list-pepijn"][not(@class)]', output, 1 + assert_xpath '/div[@class="ulist"]/ul/li[@id="santa-list-dan"][@class="naughty"]', output, 1 + assert_xpath '/div[@class="ulist"]/ul/li[not(@id)][not(@class)]/p[text()="Sarah"]', output, 1 + end + + test 'should assign id and role on list items ordered' do + input = 'santa_list::olist[]' + doc = document_from_string input, standalone: false, extension_registry: create_santa_list_block_macro + output = doc.convert + assert_xpath '/div[@class="olist"]/ol/li[@class="friendly"][@id="santa-list-guillaume"]', output, 1 + assert_xpath '/div[@class="olist"]/ol/li[@class="kind contributor java"]', output, 1 + assert_xpath '/div[@class="olist"]/ol/li[@class="kind contributor java"][not(@id)]', output, 1 + assert_xpath '/div[@class="olist"]/ol/li[@id="santa-list-pepijn"][not(@class)]', output, 1 + assert_xpath '/div[@class="olist"]/ol/li[@id="santa-list-dan"][@class="naughty"]', output, 1 + assert_xpath '/div[@class="olist"]/ol/li[not(@id)][not(@class)]/p[text()="Sarah"]', output, 1 + end end end diff -Nru asciidoctor-1.5.5/test/fixtures/basic.adoc asciidoctor-2.0.10/test/fixtures/basic.adoc --- asciidoctor-1.5.5/test/fixtures/basic.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/basic.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,5 @@ += Document Title +Doc Writer +v1.0, 2013-01-01 + +Body content. diff -Nru asciidoctor-1.5.5/test/fixtures/basic.asciidoc asciidoctor-2.0.10/test/fixtures/basic.asciidoc --- asciidoctor-1.5.5/test/fixtures/basic.asciidoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/basic.asciidoc 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -= Document Title -Doc Writer -v1.0, 2013-01-01 - -Body content. diff -Nru asciidoctor-1.5.5/test/fixtures/basic-docinfo-header.html asciidoctor-2.0.10/test/fixtures/basic-docinfo-header.html --- asciidoctor-1.5.5/test/fixtures/basic-docinfo-header.html 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/basic-docinfo-header.html 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,8 @@ + diff -Nru asciidoctor-1.5.5/test/fixtures/configure-stdin.rb asciidoctor-2.0.10/test/fixtures/configure-stdin.rb --- asciidoctor-1.5.5/test/fixtures/configure-stdin.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/configure-stdin.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,4 @@ +require 'stringio' +io = StringIO.new String.new %(é\n\n#{Encoding.default_external}:#{Encoding.default_internal}), encoding: Encoding::UTF_8 +io.set_encoding Encoding.default_external, Encoding.default_internal +$stdin = io diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/erb/html5/block_paragraph.html.erb asciidoctor-2.0.10/test/fixtures/custom-backends/erb/html5/block_paragraph.html.erb --- asciidoctor-1.5.5/test/fixtures/custom-backends/erb/html5/block_paragraph.html.erb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/erb/html5/block_paragraph.html.erb 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,4 @@ -<%#encoding:UTF-8%> class="<%= ['paragraph',role].compact * ' ' %>"><% + class="<%= ['paragraph',role].compact * ' ' %>"><% if title? %>
    <%= title %>
    <% end %> diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/erb/html5/open.html.erb asciidoctor-2.0.10/test/fixtures/custom-backends/erb/html5/open.html.erb --- asciidoctor-1.5.5/test/fixtures/custom-backends/erb/html5/open.html.erb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/erb/html5/open.html.erb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,8 @@ + class="<%= ['openblock',(@style == 'open' ? nil : @style),role].compact * ' ' %>"><% + if title? %> +
    <%= title %>
    <% + end %> +
    +<%= content %> +
    +
    diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/haml/docbook45/block_paragraph.xml.haml asciidoctor-2.0.10/test/fixtures/custom-backends/haml/docbook45/block_paragraph.xml.haml --- asciidoctor-1.5.5/test/fixtures/custom-backends/haml/docbook45/block_paragraph.xml.haml 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/haml/docbook45/block_paragraph.xml.haml 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -- if title? - %formalpara{:id=>@id, :role=>(attr :role), :xreflabel=>(attr :reftext)} - %title=title - %para=content -- else - %para{:id=>@id, :role=>(attr :role), :xreflabel=>(attr :reftext)}=content diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/haml/docbook5/block_paragraph.xml.haml asciidoctor-2.0.10/test/fixtures/custom-backends/haml/docbook5/block_paragraph.xml.haml --- asciidoctor-1.5.5/test/fixtures/custom-backends/haml/docbook5/block_paragraph.xml.haml 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/haml/docbook5/block_paragraph.xml.haml 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,6 @@ +- if title? + %formalpara{'xml:id'=>@id, role: (attr :role), xreflabel: (attr :reftext)} + %title=title + %para=content +- else + %para{'xml:id'=>@id, role: (attr :role), xreflabel: (attr :reftext)}=content diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5/block_paragraph.html.haml asciidoctor-2.0.10/test/fixtures/custom-backends/haml/html5/block_paragraph.html.haml --- asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5/block_paragraph.html.haml 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/haml/html5/block_paragraph.html.haml 2019-08-18 16:11:54.000000000 +0000 @@ -1,3 +1,3 @@ - if title? .title=title -%p{:id=>@id, :class=>(attr 'role')}=content +%p{id: @id, class: (attr 'role')}=content diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5/block_sidebar.html.haml asciidoctor-2.0.10/test/fixtures/custom-backends/haml/html5/block_sidebar.html.haml --- asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5/block_sidebar.html.haml 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/haml/html5/block_sidebar.html.haml 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,4 @@ -%aside{:id=>@id, :class=>(attr 'role')} +%aside{id: @id, class: (attr 'role')} - if title? %header %h1=title diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5-tweaks/embedded.html.haml asciidoctor-2.0.10/test/fixtures/custom-backends/haml/html5-tweaks/embedded.html.haml --- asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5-tweaks/embedded.html.haml 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/haml/html5-tweaks/embedded.html.haml 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1 @@ +=content diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/slim/docbook45/block_paragraph.xml.slim asciidoctor-2.0.10/test/fixtures/custom-backends/slim/docbook45/block_paragraph.xml.slim --- asciidoctor-1.5.5/test/fixtures/custom-backends/slim/docbook45/block_paragraph.xml.slim 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/slim/docbook45/block_paragraph.xml.slim 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -- if title? - formalpara id=@id role=(attr :role) xreflabel=(attr :reftext) - title=title - para=content -- else - para id=@id role=(attr :role) xreflabel=(attr :reftext) =content diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/slim/docbook5/block_paragraph.xml.slim asciidoctor-2.0.10/test/fixtures/custom-backends/slim/docbook5/block_paragraph.xml.slim --- asciidoctor-1.5.5/test/fixtures/custom-backends/slim/docbook5/block_paragraph.xml.slim 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/slim/docbook5/block_paragraph.xml.slim 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,6 @@ +- if title? + formalpara xml:id=@id role=(attr :role) xreflabel=(attr :reftext) + title=title + para=content +- else + para xml:id=@id role=(attr :role) xreflabel=(attr :reftext) =content diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/slim/html5/block_paragraph.html.slim asciidoctor-2.0.10/test/fixtures/custom-backends/slim/html5/block_paragraph.html.slim --- asciidoctor-1.5.5/test/fixtures/custom-backends/slim/html5/block_paragraph.html.slim 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/slim/html5/block_paragraph.html.slim 2019-08-18 16:11:54.000000000 +0000 @@ -1,3 +1,3 @@ - if title? .title=title -p id=@id class=(attr 'role') =content +p id=id class="#{role}" =content diff -Nru asciidoctor-1.5.5/test/fixtures/custom-backends/slim/html5/block_sidebar.html.slim asciidoctor-2.0.10/test/fixtures/custom-backends/slim/html5/block_sidebar.html.slim --- asciidoctor-1.5.5/test/fixtures/custom-backends/slim/html5/block_sidebar.html.slim 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/custom-backends/slim/html5/block_sidebar.html.slim 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,4 @@ -aside id=@id class=(attr 'role') +aside id=id class="#{role}" - if title? header h1=title diff -Nru asciidoctor-1.5.5/test/fixtures/docinfo-footer.xml asciidoctor-2.0.10/test/fixtures/docinfo-footer.xml --- asciidoctor-1.5.5/test/fixtures/docinfo-footer.xml 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/docinfo-footer.xml 2019-08-18 16:11:54.000000000 +0000 @@ -1,4 +1,4 @@ - + Glossary term diff -Nru asciidoctor-1.5.5/test/fixtures/doctime-localtime.adoc asciidoctor-2.0.10/test/fixtures/doctime-localtime.adoc --- asciidoctor-1.5.5/test/fixtures/doctime-localtime.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/doctime-localtime.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,2 @@ +{doctime} +{localtime} Binary files /tmp/tmpbco7owks/P5VM7bOCiu/asciidoctor-1.5.5/test/fixtures/dot and /tmp/tmpbco7owks/CcGrvyMK7o/asciidoctor-2.0.10/test/fixtures/dot differ diff -Nru asciidoctor-1.5.5/test/fixtures/encoding.adoc asciidoctor-2.0.10/test/fixtures/encoding.adoc --- asciidoctor-1.5.5/test/fixtures/encoding.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/encoding.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,13 @@ +Gregory Romé has written an AsciiDoc plugin for the Redmine project management application. + +https://github.com/foo-users/foo +へと `vicmd` キーマップを足してみている試み、 +アニメーションgifです。 + +tag::romé[] +Gregory Romé has written an AsciiDoc plugin for the Redmine project management application. +end::romé[] + +== Überschrift + +* Codierungen sind verrückt auf älteren Versionen von Ruby diff -Nru asciidoctor-1.5.5/test/fixtures/encoding.asciidoc asciidoctor-2.0.10/test/fixtures/encoding.asciidoc --- asciidoctor-1.5.5/test/fixtures/encoding.asciidoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/encoding.asciidoc 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -Gregory Romé has written an AsciiDoc plugin for the Redmine project management application. - -https://github.com/foo-users/foo -へと `vicmd` キーマップを足してみている試み、 -アニメーションgifです。 - -tag::romé[] -Gregory Romé has written an AsciiDoc plugin for the Redmine project management application. -end::romé[] - -== Überschrift - -* Codierungen sind verrückt auf älteren Versionen von Ruby diff -Nru asciidoctor-1.5.5/test/fixtures/file-with-missing-include.adoc asciidoctor-2.0.10/test/fixtures/file-with-missing-include.adoc --- asciidoctor-1.5.5/test/fixtures/file-with-missing-include.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/file-with-missing-include.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1 @@ +include::no-such-file.adoc[] Binary files /tmp/tmpbco7owks/P5VM7bOCiu/asciidoctor-1.5.5/test/fixtures/hello-asciidoctor.pdf and /tmp/tmpbco7owks/CcGrvyMK7o/asciidoctor-2.0.10/test/fixtures/hello-asciidoctor.pdf differ diff -Nru asciidoctor-1.5.5/test/fixtures/include-alt-extension.asciidoc asciidoctor-2.0.10/test/fixtures/include-alt-extension.asciidoc --- asciidoctor-1.5.5/test/fixtures/include-alt-extension.asciidoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/include-alt-extension.asciidoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,5 @@ +first line + +ifdef::asciidoctor-version[Asciidoctor!] + +last line diff -Nru asciidoctor-1.5.5/test/fixtures/include-file.adoc asciidoctor-2.0.10/test/fixtures/include-file.adoc --- asciidoctor-1.5.5/test/fixtures/include-file.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/include-file.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,24 @@ +first line of included content +second line of included content +third line of included content +fourth line of included content +fifth line of included content +sixth line of included content +seventh line of included content +eighth line of included content + +// tag::snippet[] +// tag::snippetA[] +snippetA content +// end::snippetA[] + +non-tagged content + +// tag::snippetB[] +snippetB content +// end::snippetB[] +// end::snippet[] + +more non-tagged content + +last line of included content diff -Nru asciidoctor-1.5.5/test/fixtures/include-file.asciidoc asciidoctor-2.0.10/test/fixtures/include-file.asciidoc --- asciidoctor-1.5.5/test/fixtures/include-file.asciidoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/include-file.asciidoc 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -first line of included content -second line of included content -third line of included content -fourth line of included content -fifth line of included content -sixth line of included content -seventh line of included content -eighth line of included content - -// tag::snippet[] -// tag::snippetA[] -snippetA content -// end::snippetA[] - -non-tagged content - -// tag::snippetB[] -snippetB content -// end::snippetB[] -// end::snippet[] - -more non-tagged content - -last line of included content diff -Nru asciidoctor-1.5.5/test/fixtures/include-file.jsx asciidoctor-2.0.10/test/fixtures/include-file.jsx --- asciidoctor-1.5.5/test/fixtures/include-file.jsx 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/include-file.jsx 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,8 @@ +const element = ( +
    +

    Hello, Programmer!

    + +

    Welcome to the club.

    + +
    +) diff -Nru asciidoctor-1.5.5/test/fixtures/include-file.ml asciidoctor-2.0.10/test/fixtures/include-file.ml --- asciidoctor-1.5.5/test/fixtures/include-file.ml 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/include-file.ml 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,3 @@ +(* tag::snippet[] *) +let s = SS.empty;; +(* end::snippet[] *) diff -Nru asciidoctor-1.5.5/test/fixtures/lists.adoc asciidoctor-2.0.10/test/fixtures/lists.adoc --- asciidoctor-1.5.5/test/fixtures/lists.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/lists.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,96 @@ += Document Title +Doc Writer + +Preamble paragraph. + +NOTE: This is test, only a test. + +== Lists + +.Unordered, basic +* Edgar Allen Poe +* Sheri S. Tepper +* Bill Bryson + +.Unordered, max nesting +* level 1 +** level 2 +*** level 3 +**** level 4 +***** level 5 +* level 1 + +.Checklist +- [*] checked +- [x] also checked +- [ ] not checked +- normal list item + +.Ordered, basic +. Step 1 +. Step 2 +. Step 3 + +.Ordered, nested +. Step 1 +. Step 2 +.. Step 2a +.. Step 2b +. Step 3 + +.Ordered, max nesting +. level 1 +.. level 2 +... level 3 +.... level 4 +..... level 5 +. level 1 + +.Labeled, single-line +first term:: definition of first term +section term:: definition of second term + +.Labeled, multi-line +first term:: +definition of first term +second term:: +definition of second term + +.Q&A +[qanda] +What is Asciidoctor?:: + An implementation of the AsciiDoc processor in Ruby. +What is the answer to the Ultimate Question?:: 42 + +.Mixed +Operating Systems:: + Linux::: + . Fedora + * Desktop + . Ubuntu + * Desktop + * Server + BSD::: + . FreeBSD + . NetBSD + +Cloud Providers:: + PaaS::: + . OpenShift + . CloudBees + IaaS::: + . Amazon EC2 + . Rackspace + +.Unordered, complex +* level 1 +** level 2 +*** level 3 +This is a new line inside an unordered list using {plus} symbol. +We can even force content to start on a separate line... + +Amazing, isn't it? +**** level 4 ++ +The {plus} symbol is on a new line. + +***** level 5 diff -Nru asciidoctor-1.5.5/test/fixtures/mismatched-end-tag.adoc asciidoctor-2.0.10/test/fixtures/mismatched-end-tag.adoc --- asciidoctor-1.5.5/test/fixtures/mismatched-end-tag.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/mismatched-end-tag.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,7 @@ +//tag::a[] +a +//tag::b[] +b +//end::a[] +//end::b[] +c diff -Nru asciidoctor-1.5.5/test/fixtures/other-chapters.adoc asciidoctor-2.0.10/test/fixtures/other-chapters.adoc --- asciidoctor-1.5.5/test/fixtures/other-chapters.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/other-chapters.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,11 @@ +// tag::ch2[] +[#ch2] +== Chapter 2 + +The plot thickens. +// end::ch2[] + +[#ch3] +== Chapter 3 + +The plot runs its course, predictably. diff -Nru asciidoctor-1.5.5/test/fixtures/outer-include.adoc asciidoctor-2.0.10/test/fixtures/outer-include.adoc --- asciidoctor-1.5.5/test/fixtures/outer-include.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/outer-include.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,5 @@ +first line of outer + +include::subdir/middle-include.adoc[] + +last line of outer diff -Nru asciidoctor-1.5.5/test/fixtures/parent-include-restricted.adoc asciidoctor-2.0.10/test/fixtures/parent-include-restricted.adoc --- asciidoctor-1.5.5/test/fixtures/parent-include-restricted.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/parent-include-restricted.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -1,5 +1,5 @@ first line of parent -include::child-include.adoc[depth=1] +include::child-include.adoc[depth=0] last line of parent diff -Nru asciidoctor-1.5.5/test/fixtures/sample.adoc asciidoctor-2.0.10/test/fixtures/sample.adoc --- asciidoctor-1.5.5/test/fixtures/sample.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/sample.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,30 @@ +Document Title +============== +Doc Writer +:idprefix: id_ + +Preamble paragraph. + +NOTE: This is test, only a test. + +== Section A + +*Section A* paragraph. + +=== Section A Subsection + +*Section A* 'subsection' paragraph. + +== Section B + +*Section B* paragraph. + +|=== +|a |b |c +|1 |2 |3 +|=== + +.Section B list +* Item 1 +* Item 2 +* Item 3 diff -Nru asciidoctor-1.5.5/test/fixtures/sample-alt-extension.asciidoc asciidoctor-2.0.10/test/fixtures/sample-alt-extension.asciidoc --- asciidoctor-1.5.5/test/fixtures/sample-alt-extension.asciidoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/sample-alt-extension.asciidoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,3 @@ += Document Title + +contents diff -Nru asciidoctor-1.5.5/test/fixtures/sample.asciidoc asciidoctor-2.0.10/test/fixtures/sample.asciidoc --- asciidoctor-1.5.5/test/fixtures/sample.asciidoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/sample.asciidoc 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -Document Title -============== -Doc Writer -:idprefix: id_ - -Preamble paragraph. - -NOTE: This is test, only a test. - -== Section A - -*Section A* paragraph. - -=== Section A Subsection - -*Section A* 'subsection' paragraph. - -== Section B - -*Section B* paragraph. - -.Section B list -* Item 1 -* Item 2 -* Item 3 - diff -Nru asciidoctor-1.5.5/test/fixtures/sample-docinfo-header.xml asciidoctor-2.0.10/test/fixtures/sample-docinfo-header.xml --- asciidoctor-1.5.5/test/fixtures/sample-docinfo-header.xml 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/sample-docinfo-header.xml 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,26 @@ + +{doctitle} +{docdate} + + +{firstname} +{lastname} + +{email} + +{authorinitials} + + +1.0 +2000-01-01 +jwz +New millennium, new release. + + +2.0 +2010-12-25 +why +Why not? A new release. + + + diff -Nru asciidoctor-1.5.5/test/fixtures/section-a.adoc asciidoctor-2.0.10/test/fixtures/section-a.adoc --- asciidoctor-1.5.5/test/fixtures/section-a.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/section-a.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,4 @@ +[#section-a] +== Section A + +contents diff -Nru asciidoctor-1.5.5/test/fixtures/source-block.adoc asciidoctor-2.0.10/test/fixtures/source-block.adoc --- asciidoctor-1.5.5/test/fixtures/source-block.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/source-block.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,4 @@ +[source,ruby] +---- +puts 'Hello, World!' +---- diff -Nru asciidoctor-1.5.5/test/fixtures/subdir/index.adoc asciidoctor-2.0.10/test/fixtures/subdir/index.adoc --- asciidoctor-1.5.5/test/fixtures/subdir/index.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/subdir/index.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,3 @@ += Sample Document + +content diff -Nru asciidoctor-1.5.5/test/fixtures/subdir/inner-include.adoc asciidoctor-2.0.10/test/fixtures/subdir/inner-include.adoc --- asciidoctor-1.5.5/test/fixtures/subdir/inner-include.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/subdir/inner-include.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,3 @@ +first line of inner + +last line of inner diff -Nru asciidoctor-1.5.5/test/fixtures/subdir/middle-include.adoc asciidoctor-2.0.10/test/fixtures/subdir/middle-include.adoc --- asciidoctor-1.5.5/test/fixtures/subdir/middle-include.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/subdir/middle-include.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,5 @@ +first line of middle + +include::inner-include.adoc[] + +last line of middle diff -Nru asciidoctor-1.5.5/test/fixtures/subs.adoc asciidoctor-2.0.10/test/fixtures/subs.adoc --- asciidoctor-1.5.5/test/fixtures/subs.adoc 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/subs.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -4,4 +4,3 @@ :bootstrap-version: 3.2.0 Body content. - diff -Nru asciidoctor-1.5.5/test/fixtures/tagged-class-enclosed.rb asciidoctor-2.0.10/test/fixtures/tagged-class-enclosed.rb --- asciidoctor-1.5.5/test/fixtures/tagged-class-enclosed.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/tagged-class-enclosed.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,25 @@ +#tag::all[] +class Dog + #tag::init[] + def initialize breed + @breed = breed + end + #end::init[] + #tag::bark[] + + def bark + #tag::bark-beagle[] + if @breed == 'beagle' + 'woof woof woof woof woof' + #end::bark-beagle[] + #tag::bark-other[] + else + 'woof woof' + #end::bark-other[] + #tag::bark-all[] + end + #end::bark-all[] + end + #end::bark[] +end +#end::all[] diff -Nru asciidoctor-1.5.5/test/fixtures/tagged-class.rb asciidoctor-2.0.10/test/fixtures/tagged-class.rb --- asciidoctor-1.5.5/test/fixtures/tagged-class.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/tagged-class.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,23 @@ +class Dog + #tag::init[] + def initialize breed + @breed = breed + end + #end::init[] + #tag::bark[] + + def bark + #tag::bark-beagle[] + if @breed == 'beagle' + 'woof woof woof woof woof' + #end::bark-beagle[] + #tag::bark-other[] + else + 'woof woof' + #end::bark-other[] + #tag::bark-all[] + end + #end::bark-all[] + end + #end::bark[] +end diff -Nru asciidoctor-1.5.5/test/fixtures/unclosed-tag.adoc asciidoctor-2.0.10/test/fixtures/unclosed-tag.adoc --- asciidoctor-1.5.5/test/fixtures/unclosed-tag.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/unclosed-tag.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,3 @@ +x +// tag::a[] +a diff -Nru asciidoctor-1.5.5/test/fixtures/undef-dir-home.rb asciidoctor-2.0.10/test/fixtures/undef-dir-home.rb --- asciidoctor-1.5.5/test/fixtures/undef-dir-home.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/undef-dir-home.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,2 @@ +# undef_method wasn't public until 2.5 +Dir.singleton_class.send :undef_method, :home diff -Nru asciidoctor-1.5.5/test/fixtures/unexpected-end-tag.adoc asciidoctor-2.0.10/test/fixtures/unexpected-end-tag.adoc --- asciidoctor-1.5.5/test/fixtures/unexpected-end-tag.adoc 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/fixtures/unexpected-end-tag.adoc 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,4 @@ +// tag::a[] +a +// end::a[] +// end::a[] diff -Nru asciidoctor-1.5.5/test/helpers_test.rb asciidoctor-2.0.10/test/helpers_test.rb --- asciidoctor-1.5.5/test/helpers_test.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/helpers_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,131 @@ +# frozen_string_literal: true +require_relative 'test_helper' + +context 'Helpers' do + context 'URI Encoding' do + test 'should URI encode non-word characters generally' do + given = ' !*/%&?\\=' + expect = '+%21%2A%2F%25%26%3F%5C%3D' + assert_equal expect, (Asciidoctor::Helpers.encode_uri_component given) + end + + test 'should not URI encode select non-word characters' do + # NOTE Ruby 2.5 and up stopped encoding ~ + given = '-.' + expect = given + assert_equal expect, (Asciidoctor::Helpers.encode_uri_component given) + end + end + + context 'URIs and Paths' do + test 'rootname should return file name without extension' do + assert_equal 'master', Asciidoctor::Helpers.rootname('master.adoc') + assert_equal 'docs/master', Asciidoctor::Helpers.rootname('docs/master.adoc') + end + + test 'rootname should file name if it has no extension' do + assert_equal 'master', Asciidoctor::Helpers.rootname('master') + assert_equal 'docs/master', Asciidoctor::Helpers.rootname('docs/master') + end + + test 'rootname should ignore dot not in last segment' do + assert_equal 'include.d/master', Asciidoctor::Helpers.rootname('include.d/master') + assert_equal 'include.d/master', Asciidoctor::Helpers.rootname('include.d/master.adoc') + end + + test 'extname? should return whether path contains an extname' do + assert Asciidoctor::Helpers.extname?('document.adoc') + assert Asciidoctor::Helpers.extname?('path/to/document.adoc') + assert_nil Asciidoctor::Helpers.extname?('basename') + refute Asciidoctor::Helpers.extname?('include.d/basename') + end + + test 'UriSniffRx should detect URIs' do + assert Asciidoctor::UriSniffRx =~ 'http://example.com' + assert Asciidoctor::UriSniffRx =~ 'https://example.com' + assert Asciidoctor::UriSniffRx =~ 'data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=' + end + + test 'UriSniffRx should not detect an absolute Windows path as a URI' do + assert Asciidoctor::UriSniffRx !~ 'c:/sample.adoc' + assert Asciidoctor::UriSniffRx !~ 'c:\\sample.adoc' + end + end + + context 'Type Resolution' do + test 'should get class for top-level class name' do + clazz = Asciidoctor::Helpers.class_for_name 'String' + refute_nil clazz + assert_equal String, clazz + end + + test 'should get class for class name in module' do + clazz = Asciidoctor::Helpers.class_for_name 'Asciidoctor::Document' + refute_nil clazz + assert_equal Asciidoctor::Document, clazz + end + + test 'should get class for class name resolved from root' do + clazz = Asciidoctor::Helpers.class_for_name '::Asciidoctor::Document' + refute_nil clazz + assert_equal Asciidoctor::Document, clazz + end + + test 'should raise exception if cannot find class for name' do + begin + Asciidoctor::Helpers.class_for_name 'InvalidModule::InvalidClass' + flunk 'Expecting RuntimeError to be raised' + rescue NameError => e + assert_equal 'Could not resolve class for name: InvalidModule::InvalidClass', e.message + end + end + + test 'should raise exception if constant name is invalid' do + begin + Asciidoctor::Helpers.class_for_name 'foobar' + flunk 'Expecting RuntimeError to be raised' + rescue NameError => e + assert_equal 'Could not resolve class for name: foobar', e.message + end + end + + test 'should raise exception if class not found in scope' do + begin + Asciidoctor::Helpers.class_for_name 'Asciidoctor::Extensions::String' + flunk 'Expecting RuntimeError to be raised' + rescue NameError => e + assert_equal 'Could not resolve class for name: Asciidoctor::Extensions::String', e.message + end + end + + test 'should raise exception if name resolves to module' do + begin + Asciidoctor::Helpers.class_for_name 'Asciidoctor::Extensions' + flunk 'Expecting RuntimeError to be raised' + rescue NameError => e + assert_equal 'Could not resolve class for name: Asciidoctor::Extensions', e.message + end + end + + test 'should resolve class if class is given' do + clazz = Asciidoctor::Helpers.resolve_class Asciidoctor::Document + refute_nil clazz + assert_equal Asciidoctor::Document, clazz + end + + test 'should resolve class if class from string' do + clazz = Asciidoctor::Helpers.resolve_class 'Asciidoctor::Document' + refute_nil clazz + assert_equal Asciidoctor::Document, clazz + end + + test 'should not resolve class if not in scope' do + begin + Asciidoctor::Helpers.resolve_class 'Asciidoctor::Extensions::String' + flunk 'Expecting RuntimeError to be raised' + rescue NameError => e + assert_equal 'Could not resolve class for name: Asciidoctor::Extensions::String', e.message + end + end + end +end diff -Nru asciidoctor-1.5.5/test/invoker_test.rb asciidoctor-2.0.10/test/invoker_test.rb --- asciidoctor-1.5.5/test/invoker_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/invoker_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,22 +1,18 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end -require 'asciidoctor/cli/options' -require 'asciidoctor/cli/invoker' +# frozen_string_literal: false +require_relative 'test_helper' +require File.join Asciidoctor::LIB_DIR, 'asciidoctor/cli' context 'Invoker' do - test 'should parse source and render as html5 article by default' do + test 'should parse source and convert to html5 article by default' do invoker = nil output = nil redirect_streams do |out, err| invoker = invoke_cli %w(-o -) output = out.string end - assert !invoker.nil? + refute_nil invoker doc = invoker.document - assert !doc.nil? + refute_nil doc assert_equal 'Document Title', doc.doctitle assert_equal 'Doc Writer', doc.attr('author') assert_equal 'html5', doc.attr('backend') @@ -24,7 +20,7 @@ assert_equal 'article', doc.attr('doctype') assert doc.blocks? assert_equal :preamble, doc.blocks.first.context - assert !output.empty? + refute_empty output assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 @@ -33,64 +29,68 @@ end test 'should set implicit doc info attributes' do - sample_filepath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.asciidoc')) - sample_filedir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures')) + sample_filepath = fixture_path 'sample.adoc' + sample_filedir = fixturedir invoker = invoke_cli_to_buffer %w(-o /dev/null), sample_filepath doc = invoker.document assert_equal 'sample', doc.attr('docname') assert_equal sample_filepath, doc.attr('docfile') assert_equal sample_filedir, doc.attr('docdir') assert doc.attr?('docdate') + assert doc.attr?('docyear') assert doc.attr?('doctime') assert doc.attr?('docdatetime') - assert invoker.read_output.empty? + assert_empty invoker.read_output end test 'should allow docdate and doctime to be overridden' do - sample_filepath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.asciidoc')) - invoker = invoke_cli_to_buffer %w(-o /dev/null -a docdate=2015-01-01 -a doctime=10:00:00-07:00), sample_filepath + sample_filepath = fixture_path 'sample.adoc' + invoker = invoke_cli_to_buffer %w(-o /dev/null -a docdate=2015-01-01 -a doctime=10:00:00-0700), sample_filepath doc = invoker.document assert doc.attr?('docdate', '2015-01-01') - assert doc.attr?('doctime', '10:00:00-07:00') - assert doc.attr?('docdatetime', '2015-01-01 10:00:00-07:00') + assert doc.attr?('docyear', '2015') + assert doc.attr?('doctime', '10:00:00-0700') + assert doc.attr?('docdatetime', '2015-01-01 10:00:00-0700') end test 'should accept document from stdin and write to stdout' do invoker = invoke_cli_to_buffer(%w(-s), '-') { 'content' } doc = invoker.document - assert !doc.attr?('docname') - assert !doc.attr?('docfile') + refute doc.attr?('docname') + refute doc.attr?('docfile') assert_equal Dir.pwd, doc.attr('docdir') assert_equal doc.attr('docdate'), doc.attr('localdate') + assert_equal doc.attr('docyear'), doc.attr('localyear') assert_equal doc.attr('doctime'), doc.attr('localtime') assert_equal doc.attr('docdatetime'), doc.attr('localdatetime') - assert !doc.attr?('outfile') + refute doc.attr?('outfile') output = invoker.read_output - assert !output.empty? + refute_empty output assert_xpath '/*[@class="paragraph"]/p[text()="content"]', output, 1 end test 'should not fail to rewind input if reading document from stdin' do - io = STDIN.dup - class << io - def readlines - ['paragraph'] - end + begin + old_stdin = $stdin + $stdin = StringIO.new 'paragraph' + invoker = invoke_cli_to_buffer(%w(-s), '-') + assert_equal 0, invoker.code + assert_equal 1, invoker.document.blocks.size + ensure + $stdin = old_stdin end - invoker = invoke_cli_to_buffer(%w(-s), '-') { io } - assert_equal 0, invoker.code - assert_equal 1, invoker.document.blocks.size end test 'should accept document from stdin and write to output file' do - sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample-output.html')) + sample_outpath = fixture_path 'sample-output.html' begin invoker = invoke_cli(%W(-s -o #{sample_outpath}), '-') { 'content' } doc = invoker.document - assert !doc.attr?('docname') - assert !doc.attr?('docfile') + refute doc.attr?('docname') + refute doc.attr?('docfile') assert_equal Dir.pwd, doc.attr('docdir') assert_equal doc.attr('docdate'), doc.attr('localdate') + assert_equal doc.attr('docyear'), doc.attr('localyear') assert_equal doc.attr('doctime'), doc.attr('localtime') assert_equal doc.attr('docdatetime'), doc.attr('localdatetime') assert doc.attr?('outfile') @@ -101,8 +101,35 @@ end end + test 'should fail if input file matches resolved output file' do + invoker = invoke_cli_to_buffer %W(-a outfilesuffix=.adoc), 'sample.adoc' + assert_match(/input file and output file cannot be the same/, invoker.read_error) + end + + test 'should fail if input file matches specified output file' do + sample_outpath = fixture_path 'sample.adoc' + invoker = invoke_cli_to_buffer %W(-o #{sample_outpath}), 'sample.adoc' + assert_match(/input file and output file cannot be the same/, invoker.read_error) + end + + test 'should accept input from named pipe and output to stdout' do + sample_inpath = fixture_path 'sample-pipe.adoc' + begin + %x(mkfifo #{sample_inpath}) + write_thread = Thread.new do + File.write sample_inpath, 'pipe content' + end + invoker = invoke_cli_to_buffer %w(-a stylesheet!), sample_inpath + result = invoker.read_output + assert_match(/pipe content/, result) + write_thread.join + ensure + FileUtils.rm_f sample_inpath + end + end unless windows? + test 'should allow docdir to be specified when input is a string' do - expected_docdir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures')) + expected_docdir = fixturedir invoker = invoke_cli_to_buffer(%w(-s --base-dir test/fixtures -o /dev/null), '-') { 'content' } doc = invoker.document assert_equal expected_docdir, doc.attr('docdir') @@ -110,7 +137,7 @@ end test 'should display version and exit' do - expected = %(Asciidoctor #{Asciidoctor::VERSION} [http://asciidoctor.org]\nRuntime Environment (#{RUBY_DESCRIPTION})) + expected = %(Asciidoctor #{Asciidoctor::VERSION} [https://asciidoctor.org]\nRuntime Environment (#{RUBY_DESCRIPTION})) ['--version', '-V'].each do |switch| actual = nil redirect_streams do |out, err| @@ -123,9 +150,9 @@ end test 'should print warnings to stderr by default' do - input = <<-EOS -2. second -3. third + input = <<~'EOS' + 2. second + 3. third EOS warnings = nil redirect_streams do |out, err| @@ -135,10 +162,25 @@ assert_match(/WARNING/, warnings) end + test 'should enable script warnings if -w flag is specified' do + old_verbose, $VERBOSE = $VERBOSE, false + begin + warnings = nil + redirect_streams do |out, err| + invoke_cli_to_buffer(%w(-w -o /dev/null), '-') { $NO_SUCH_VARIABLE || 'text' } + warnings = err.string + end + assert_equal false, $VERBOSE + refute_empty warnings + rescue + $VERBOSE = old_verbose + end + end + test 'should silence warnings if -q flag is specified' do - input = <<-EOS -2. second -3. third + input = <<~'EOS' + 2. second + 3. third EOS warnings = nil redirect_streams do |out, err| @@ -148,6 +190,37 @@ assert_equal '', warnings end + test 'should not fail to check log level when -q flag is specified' do + input = <<~'EOS' + skip to <> + + . download + . install[[install]] + . run + EOS + begin + old_stderr, $stderr = $stderr, ::StringIO.new + old_stdout, $stdout = $stdout, ::StringIO.new + invoker = invoke_cli(%w(-q), '-') { input } + assert_equal 0, invoker.code + ensure + $stderr = old_stderr + $stdout = old_stdout + end + end + + test 'should return non-zero exit code if failure level is reached' do + input = <<~'EOS' + 2. second + 3. third + EOS + exit_code, messages = redirect_streams do |_, err| + [invoke_cli(%w(-q --failure-level=WARN -o /dev/null), '-') { input }.code, err.string] + end + assert_equal 1, exit_code + assert messages.empty? + end + test 'should report usage if no input file given' do redirect_streams do |out, err| invoke_cli [], nil @@ -157,29 +230,29 @@ test 'should report error if input file does not exist' do redirect_streams do |out, err| - invoker = invoke_cli [], 'missing_file.asciidoc' - assert_match(/input file .* missing or cannot be read/, err.string) + invoker = invoke_cli [], 'missing_file.adoc' + assert_match(/input file .* is missing/, err.string) assert_equal 1, invoker.code end end test 'should treat extra arguments as files' do redirect_streams do |out, err| - invoker = invoke_cli %w(-o /dev/null extra arguments sample.asciidoc), nil - assert_match(/input file .* missing or cannot be read/, err.string) + invoker = invoke_cli %w(-o /dev/null extra arguments sample.adoc), nil + assert_match(/input file .* is missing/, err.string) assert_equal 1, invoker.code end end test 'should output to file name based on input file name' do - sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.html')) + sample_outpath = fixture_path 'sample.html' begin invoker = invoke_cli doc = invoker.document assert_equal sample_outpath, doc.attr('outfile') assert File.exist?(sample_outpath) - output = File.read(sample_outpath) - assert !output.empty? + output = File.read(sample_outpath, mode: Asciidoctor::FILE_READ_MODE) + refute_empty output assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 @@ -191,8 +264,8 @@ end test 'should output to file in destination directory if set' do - destination_path = File.expand_path(File.join(File.dirname(__FILE__), 'test_output')) - sample_outpath = File.join(destination_path, 'sample.html') + destination_path = File.join testdir, 'test_output' + sample_outpath = File.join destination_path, 'sample.html' begin FileUtils.mkdir_p(destination_path) # QUESTION should -D be relative to working directory or source directory? @@ -207,8 +280,25 @@ end end + test 'should preserve directory structure in destination directory if source directory is set' do + sample_inpath = 'subdir/index.adoc' + destination_path = 'test_output' + destination_subdir_path = File.join destination_path, 'subdir' + sample_outpath = File.join destination_subdir_path, 'index.html' + begin + FileUtils.mkdir_p(destination_path) + invoke_cli %W(-D #{destination_path} -R test/fixtures), sample_inpath + assert File.directory?(destination_subdir_path) + assert File.exist?(sample_outpath) + ensure + FileUtils.rm_f(sample_outpath) + FileUtils.rmdir(destination_subdir_path) + FileUtils.rmdir(destination_path) + end + end + test 'should output to file specified' do - sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample-output.html')) + sample_outpath = fixture_path 'sample-output.html' begin invoker = invoke_cli %W(-o #{sample_outpath}) doc = invoker.document @@ -220,12 +310,11 @@ end test 'should copy default stylesheet to target directory if linkcss is specified' do - sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample-output.html')) - asciidoctor_stylesheet = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'asciidoctor.css')) - coderay_stylesheet = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'coderay-asciidoctor.css')) + sample_outpath = fixture_path 'sample-output.html' + asciidoctor_stylesheet = fixture_path 'asciidoctor.css' + coderay_stylesheet = fixture_path 'coderay-asciidoctor.css' begin - invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a source-highlighter=coderay) - invoker.document + invoke_cli %W(-o #{sample_outpath} -a linkcss -a source-highlighter=coderay), 'source-block.adoc' assert File.exist?(sample_outpath) assert File.exist?(asciidoctor_stylesheet) assert File.exist?(coderay_stylesheet) @@ -236,14 +325,30 @@ end end + test 'should not copy coderay stylesheet to target directory when no source blocks where highlighted' do + sample_outpath = fixture_path 'sample-output.html' + asciidoctor_stylesheet = fixture_path 'asciidoctor.css' + coderay_stylesheet = fixture_path 'coderay-asciidoctor.css' + begin + invoke_cli %W(-o #{sample_outpath} -a linkcss -a source-highlighter=coderay) + assert File.exist?(sample_outpath) + assert File.exist?(asciidoctor_stylesheet) + refute File.exist?(coderay_stylesheet) + ensure + FileUtils.rm_f(sample_outpath) + FileUtils.rm_f(asciidoctor_stylesheet) + FileUtils.rm_f(coderay_stylesheet) + end + end + test 'should not copy default stylesheet to target directory if linkcss is set and copycss is unset' do - sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample-output.html')) - default_stylesheet = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'asciidoctor.css')) + sample_outpath = fixture_path 'sample-output.html' + default_stylesheet = fixture_path 'asciidoctor.css' begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a copycss!) invoker.document assert File.exist?(sample_outpath) - assert !File.exist?(default_stylesheet) + refute File.exist?(default_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(default_stylesheet) @@ -251,7 +356,7 @@ end test 'should copy custom stylesheet to target directory if stylesheet and linkcss is specified' do - destdir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'output')) + destdir = fixture_path 'output' sample_outpath = File.join destdir, 'sample-output.html' stylesdir = File.join destdir, 'styles' custom_stylesheet = File.join stylesdir, 'custom.css' @@ -269,7 +374,7 @@ end test 'should not copy custom stylesheet to target directory if stylesheet and linkcss are set and copycss is unset' do - destdir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'output')) + destdir = fixture_path 'output' sample_outpath = File.join destdir, 'sample-output.html' stylesdir = File.join destdir, 'styles' custom_stylesheet = File.join stylesdir, 'custom.css' @@ -277,7 +382,7 @@ invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a stylesdir=./styles -a stylesheet=custom.css -a copycss!) invoker.document assert File.exist?(sample_outpath) - assert !File.exist?(custom_stylesheet) + refute File.exist?(custom_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(custom_stylesheet) @@ -287,14 +392,14 @@ end test 'should not copy custom stylesheet to target directory if stylesdir is a URI' do - destdir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'output')) + destdir = fixture_path 'output' sample_outpath = File.join destdir, 'sample-output.html' stylesdir = File.join destdir, 'http:' begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a stylesdir=http://example.org/styles -a stylesheet=custom.css) invoker.document assert File.exist?(sample_outpath) - assert !File.exist?(stylesdir) + refute File.exist?(stylesdir) ensure FileUtils.rm_f(sample_outpath) FileUtils.rmdir(stylesdir) if File.directory? stylesdir @@ -302,11 +407,11 @@ end end - test 'should render all passed files' do - basic_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'basic.html')) - sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.html')) + test 'should convert all passed files' do + basic_outpath = fixture_path 'basic.html' + sample_outpath = fixture_path 'sample.html' begin - invoke_cli_with_filenames [], %w(basic.asciidoc sample.asciidoc) + invoke_cli_with_filenames [], %w(basic.adoc sample.adoc) assert File.exist?(basic_outpath) assert File.exist?(sample_outpath) ensure @@ -316,11 +421,11 @@ end test 'options should not be modified when processing multiple files' do - destination_path = File.expand_path(File.join(File.dirname(__FILE__), 'test_output')) - basic_outpath = File.join(destination_path, 'basic.htm') - sample_outpath = File.join(destination_path, 'sample.htm') + destination_path = File.join testdir, 'test_output' + basic_outpath = File.join destination_path, 'basic.htm' + sample_outpath = File.join destination_path, 'sample.htm' begin - invoke_cli_with_filenames %w(-D test/test_output -a outfilesuffix=.htm), %w(basic.asciidoc sample.asciidoc) + invoke_cli_with_filenames %w(-D test/test_output -a outfilesuffix=.htm), %w(basic.adoc sample.adoc) assert File.exist?(basic_outpath) assert File.exist?(sample_outpath) ensure @@ -330,21 +435,21 @@ end end - test 'should render all files that matches a glob expression' do - basic_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'basic.html')) + test 'should convert all files that matches a glob expression' do + basic_outpath = fixture_path 'basic.html' begin - invoke_cli_to_buffer [], "ba*.asciidoc" + invoke_cli_to_buffer [], "ba*.adoc" assert File.exist?(basic_outpath) ensure FileUtils.rm_f(basic_outpath) end end - test 'should render all files that matches an absolute path glob expression' do - basic_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'basic.html')) - glob = File.join(File.dirname(__FILE__), 'fixtures', 'ba*.asciidoc') + test 'should convert all files that matches an absolute path glob expression' do + basic_outpath = fixture_path 'basic.html' + glob = fixture_path 'ba*.adoc' # test Windows using backslash-style pathname - if ::File::ALT_SEPARATOR == '\\' + if File::ALT_SEPARATOR == '\\' glob = glob.tr '/', '\\' end @@ -357,21 +462,55 @@ end test 'should suppress header footer if specified' do - invoker = invoke_cli_to_buffer %w(-s -o -) - output = invoker.read_output - assert_xpath '/html', output, 0 - assert_xpath '/*[@id="preamble"]', output, 1 + [%w(-s -o -), %w(-e -o -)].each do |flags| + invoker = invoke_cli_to_buffer flags + output = invoker.read_output + assert_xpath '/html', output, 0 + assert_xpath '/*[@id="preamble"]', output, 1 + end + end + + test 'should write page for each alternate manname' do + outdir = fixturedir + outfile_1 = File.join outdir, 'eve.1' + outfile_2 = File.join outdir, 'islifeform.1' + input = <<~'EOS' + = eve(1) + Andrew Stanton + v1.0.0 + :doctype: manpage + :manmanual: EVE + :mansource: EVE + + == NAME + + eve, islifeform - analyzes an image to determine if it's a picture of a life form + + == SYNOPSIS + + *eve* ['OPTION']... 'FILE'... + EOS + + begin + invoke_cli(%W(-b manpage -o #{outfile_1}), '-') { input } + assert File.exist?(outfile_1) + assert File.exist?(outfile_2) + assert_equal '.so eve.1', (File.read outfile_2, mode: Asciidoctor::FILE_READ_MODE).chomp + ensure + FileUtils.rm_f outfile_1 + FileUtils.rm_f outfile_2 + end end - test 'should output a trailing endline to stdout' do + test 'should output a trailing newline to stdout' do invoker = nil output = nil redirect_streams do |out, err| invoker = invoke_cli %w(-o -) output = out.string end - assert !invoker.nil? - assert !output.nil? + refute_nil invoker + refute_nil output assert output.end_with?("\n") end @@ -384,10 +523,10 @@ assert_xpath '/html', output, 1 end - test 'should set backend to docbook45 if specified' do - invoker = invoke_cli_to_buffer %w(-b docbook45 -a xmlns -o -) + test 'should set backend to docbook5 if specified' do + invoker = invoke_cli_to_buffer %w(-b docbook5 -a xmlns -o -) doc = invoker.document - assert_equal 'docbook45', doc.attr('backend') + assert_equal 'docbook5', doc.attr('backend') assert_equal '.xml', doc.attr('outfilesuffix') output = invoker.read_output assert_xpath '/xmlns:article', output, 1 @@ -409,19 +548,45 @@ assert_xpath '/html/body[@class="book"]', output, 1 end + test 'should warn if doctype is inline and the first block is not a candidate for inline conversion' do + ['== Section Title', 'image::tiger.png[]'].each do |input| + warnings = redirect_streams do |out, err| + invoke_cli_to_buffer(%w(-d inline), '-') { input } + err.string + end + assert_match(/WARNING: no inline candidate/, warnings) + end + end + + test 'should not warn if doctype is inline and the document has no blocks' do + warnings = redirect_streams do |out, err| + invoke_cli_to_buffer(%w(-d inline), '-') { '// comment' } + err.string + end + refute_match(/WARNING/, warnings) + end + + test 'should not warn if doctype is inline and the document contains multiple blocks' do + warnings = redirect_streams do |out, err| + invoke_cli_to_buffer(%w(-d inline), '-') { %(paragraph one\n\nparagraph two\n\nparagraph three) } + err.string + end + refute_match(/WARNING/, warnings) + end + test 'should locate custom templates based on template dir, template engine and backend' do - custom_backend_root = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends')) + custom_backend_root = fixture_path 'custom-backends' invoker = invoke_cli_to_buffer %W(-E haml -T #{custom_backend_root} -o -) doc = invoker.document - assert doc.converter.is_a? Asciidoctor::Converter::CompositeConverter + assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter 'paragraph' - assert selected.is_a? Asciidoctor::Converter::TemplateConverter - assert selected.templates['paragraph'].is_a? Tilt::HamlTemplate + assert_kind_of Asciidoctor::Converter::TemplateConverter, selected + assert_kind_of Tilt::HamlTemplate, selected.templates['paragraph'] end test 'should load custom templates from multiple template directories' do - custom_backend_1 = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends/haml/html5')) - custom_backend_2 = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends/haml/html5-tweaks')) + custom_backend_1 = fixture_path 'custom-backends/haml/html5' + custom_backend_2 = fixture_path 'custom-backends/haml/html5-tweaks' invoker = invoke_cli_to_buffer %W(-T #{custom_backend_1} -T #{custom_backend_2} -o - -s) output = invoker.read_output assert_css '.paragraph', output, 0 @@ -472,7 +637,7 @@ test 'should unset attribute ending in bang' do invoker = invoke_cli_to_buffer %w(-a sectids! -s -o -) doc = invoker.document - assert !doc.attr?('sectids') + refute doc.attr?('sectids') output = invoker.read_output # leave the count loose in case we add more sections assert_xpath '//h2[not(@id)]', output @@ -511,58 +676,96 @@ end test 'should force default external encoding to UTF-8' do - executable = File.expand_path(File.join(File.dirname(__FILE__), '..', 'bin', 'asciidoctor')) - input_path = fixture_path 'encoding.asciidoc' - old_lang = ENV['LANG'] - ENV['LANG'] = 'US-ASCII' - begin - # using open3 to work around a bug in JRuby process_manager.rb, - # which tries to run a gsub on stdout prematurely breaking the test - require 'open3' - #cmd = "#{executable} -o - --trace #{input_path}" - cmd = "#{File.join RbConfig::CONFIG['bindir'], RbConfig::CONFIG['ruby_install_name']} #{executable} -o - --trace #{input_path}" - _, out, _ = Open3.popen3 cmd - #stderr_lines = stderr.readlines - # warnings may be issued, so don't assert on stderr - #assert stderr_lines.empty?, 'Command failed. Expected to receive a rendered document.' - stdout_lines = out.readlines - assert !stdout_lines.empty? - stdout_lines.each {|l| l.force_encoding Encoding::UTF_8 } if Asciidoctor::FORCE_ENCODING - stdout_str = stdout_lines.join - assert stdout_str.include?('Codierungen sind verrückt auf älteren Versionen von Ruby') - ensure - ENV['LANG'] = old_lang - end + input_path = fixture_path 'encoding.adoc' + # using open3 to work around a bug in JRuby process_manager.rb, + # which tries to run a gsub on stdout prematurely breaking the test + # warnings may be issued, so don't assert on stderr + stdout_lines = run_command({ 'LANG' => 'US-ASCII' }, %(#{asciidoctor_cmd} -o - --trace #{input_path})) {|out| out.readlines } + refute_empty stdout_lines + # NOTE Ruby on Windows runs with a IBM437 encoding by default + stdout_lines.each {|l| l.force_encoding Encoding::UTF_8 } unless Encoding.default_external == Encoding::UTF_8 + stdout_str = stdout_lines.join + assert_includes stdout_str, 'Codierungen sind verrückt auf älteren Versionen von Ruby' + end + + test 'should force stdio encoding to UTF-8' do + result = run_command(%(#{asciidoctor_cmd true, '-E IBM866:IBM866'} -r #{fixture_path 'configure-stdin.rb'} -s -o - -)) {|out| out.read } + # NOTE Ruby on Windows runs with a IBM437 encoding by default + result.force_encoding Encoding::UTF_8 unless Encoding.default_external == Encoding::UTF_8 + assert_equal Encoding::UTF_8, result.encoding + assert_include '

    é

    ', result + assert_include '

    IBM866:IBM866

    ', result + end + + test 'should not fail to load if call to Dir.home fails' do + rubyopt = %(-r #{fixture_path 'undef-dir-home.rb'}) + result = run_command(%(#{asciidoctor_cmd true, rubyopt} -s -o - #{fixture_path 'basic.adoc'})) {|out| out.read } + assert_include 'Body content', result end test 'should print timings when -t flag is specified' do - input = <<-EOS - Sample *AsciiDoc* - EOS + input = 'Sample *AsciiDoc*' invoker = nil error = nil - redirect_streams do |out, err| + redirect_streams do |_, err| invoker = invoke_cli(%w(-t -o /dev/null), '-') { input } error = err.string end - assert !invoker.nil? - assert !error.nil? + refute_nil invoker + refute_nil error assert_match(/Total time/, error) end + test 'should show timezone as UTC if system TZ is set to UTC' do + input_path = fixture_path 'doctime-localtime.adoc' + output = run_command({ 'TZ' => 'UTC', 'SOURCE_DATE_EPOCH' => nil }, %(#{asciidoctor_cmd} -d inline -o - -s #{input_path})) {|out| out.read } + doctime, localtime = output.lines.map(&:chomp) + assert doctime.end_with?(' UTC') + assert localtime.end_with?(' UTC') + end + + test 'should show timezone as offset if system TZ is not set to UTC' do + input_path = fixture_path 'doctime-localtime.adoc' + output = run_command({ 'TZ' => 'EST+5', 'SOURCE_DATE_EPOCH' => nil }, %(#{asciidoctor_cmd} -d inline -o - -s #{input_path})) {|out| out.read } + doctime, localtime = output.lines.map(&:chomp) + assert doctime.end_with?(' -0500') + assert localtime.end_with?(' -0500') + end + test 'should use SOURCE_DATE_EPOCH as modified time of input file and local time' do old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' begin ENV['SOURCE_DATE_EPOCH'] = '1234123412' - sample_filepath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.asciidoc')) + sample_filepath = fixture_path 'sample.adoc' invoker = invoke_cli_to_buffer %w(-o /dev/null), sample_filepath doc = invoker.document assert_equal '2009-02-08', (doc.attr 'docdate') - assert_match(/2009-02-08 20:03:32 (GMT|UTC)/, (doc.attr 'docdatetime')) + assert_equal '2009', (doc.attr 'docyear') + assert_match(/2009-02-08 20:03:32 UTC/, (doc.attr 'docdatetime')) assert_equal '2009-02-08', (doc.attr 'localdate') - assert_match(/2009-02-08 20:03:32 (GMT|UTC)/, (doc.attr 'localdatetime')) + assert_equal '2009', (doc.attr 'localyear') + assert_match(/2009-02-08 20:03:32 UTC/, (doc.attr 'localdatetime')) ensure - ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch if old_source_date_epoch + if old_source_date_epoch + ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch + else + ENV.delete 'SOURCE_DATE_EPOCH' + end + end + end + + test 'should fail if SOURCE_DATE_EPOCH is malformed' do + old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' + begin + ENV['SOURCE_DATE_EPOCH'] = 'aaaaaaaa' + sample_filepath = fixture_path 'sample.adoc' + assert_equal 1, (invoke_cli_to_buffer %w(-o /dev/null), sample_filepath).code + ensure + if old_source_date_epoch + ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch + else + ENV.delete 'SOURCE_DATE_EPOCH' + end end end end diff -Nru asciidoctor-1.5.5/test/links_test.rb asciidoctor-2.0.10/test/links_test.rb --- asciidoctor-1.5.5/test/links_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/links_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,347 +1,990 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context 'Links' do test 'qualified url inline with text' do - assert_xpath "//a[@href='http://asciidoc.org'][@class='bare'][text() = 'http://asciidoc.org']", render_string("The AsciiDoc project is located at http://asciidoc.org.") + assert_xpath "//a[@href='http://asciidoc.org'][@class='bare'][text() = 'http://asciidoc.org']", convert_string("The AsciiDoc project is located at http://asciidoc.org.") + end + + test 'qualified url with role inline with text' do + assert_xpath "//a[@href='http://asciidoc.org'][@class='bare project'][text() = 'http://asciidoc.org']", convert_string("The AsciiDoc project is located at http://asciidoc.org[role=project].") end test 'qualified http url inline with hide-uri-scheme set' do - assert_xpath "//a[@href='http://asciidoc.org'][@class='bare'][text() = 'asciidoc.org']", render_string("The AsciiDoc project is located at http://asciidoc.org.", :attributes => {'hide-uri-scheme' => ''}) + assert_xpath "//a[@href='http://asciidoc.org'][@class='bare'][text() = 'asciidoc.org']", convert_string("The AsciiDoc project is located at http://asciidoc.org.", attributes: { 'hide-uri-scheme' => '' }) end test 'qualified file url inline with label' do - assert_xpath "//a[@href='file:///home/user/bookmarks.html'][text() = 'My Bookmarks']", render_embedded_string('file:///home/user/bookmarks.html[My Bookmarks]') + assert_xpath "//a[@href='file:///home/user/bookmarks.html'][text() = 'My Bookmarks']", convert_string_to_embedded('file:///home/user/bookmarks.html[My Bookmarks]') end test 'qualified file url inline with hide-uri-scheme set' do - assert_xpath "//a[@href='file:///etc/app.conf'][text() = '/etc/app.conf']", render_string('Edit the configuration file link:file:///etc/app.conf[]', :attributes => {'hide-uri-scheme' => ''}) + assert_xpath "//a[@href='file:///etc/app.conf'][text() = '/etc/app.conf']", convert_string('Edit the configuration file link:file:///etc/app.conf[]', attributes: { 'hide-uri-scheme' => '' }) + end + + test 'should not hide bare URI scheme in implicit text of link macro when hide-uri-scheme is set' do + { + 'link:https://[]' => 'https://', + 'link:ssh://[]' => 'ssh://', + }.each do |input, expected| + assert_xpath %(/a[text() = "#{expected}"]), (convert_inline_string input, attributes: { 'hide-uri-scheme' => '' }) + end end test 'qualified url with label' do - assert_xpath "//a[@href='http://asciidoc.org'][text() = 'AsciiDoc']", render_string("We're parsing http://asciidoc.org[AsciiDoc] markup") + assert_xpath "//a[@href='http://asciidoc.org'][text() = 'AsciiDoc']", convert_string("We're parsing http://asciidoc.org[AsciiDoc] markup") end test 'qualified url with label containing escaped right square bracket' do - assert_xpath "//a[@href='http://asciidoc.org'][text() = '[Ascii]Doc']", render_string("We're parsing http://asciidoc.org[[Ascii\\]Doc] markup") + assert_xpath "//a[@href='http://asciidoc.org'][text() = '[Ascii]Doc']", convert_string("We're parsing http://asciidoc.org[[Ascii\\]Doc] markup") + end + + test 'qualified url with backslash label' do + assert_xpath "//a[@href='https://google.com'][text() = 'Google for \\']", convert_string("I advise you to https://google.com[Google for +\\+]") end test 'qualified url with label using link macro' do - assert_xpath "//a[@href='http://asciidoc.org'][text() = 'AsciiDoc']", render_string("We're parsing link:http://asciidoc.org[AsciiDoc] markup") + assert_xpath "//a[@href='http://asciidoc.org'][text() = 'AsciiDoc']", convert_string("We're parsing link:http://asciidoc.org[AsciiDoc] markup") + end + + test 'qualified url with role using link macro' do + assert_xpath "//a[@href='http://asciidoc.org'][@class='bare project'][text() = 'http://asciidoc.org']", convert_string("We're parsing link:http://asciidoc.org[role=project] markup") end test 'qualified url using macro syntax with multi-line label inline with text' do - assert_xpath %{//a[@href='http://asciidoc.org'][text() = 'AsciiDoc\nmarkup']}, render_string("We're parsing link:http://asciidoc.org[AsciiDoc\nmarkup]") + assert_xpath %{//a[@href='http://asciidoc.org'][text() = 'AsciiDoc\nmarkup']}, convert_string("We're parsing link:http://asciidoc.org[AsciiDoc\nmarkup]") end test 'qualified url with label containing square brackets using link macro' do str = 'http://example.com[[bracket1\]]' - doc = document_from_string str, :header_footer => false, :doctype => 'inline' + doc = document_from_string str, standalone: false, doctype: 'inline' assert_match '[bracket1]', doc.convert, 1 - doc = document_from_string str, :header_footer => false, :backend => 'docbook', :doctype => 'inline' + doc = document_from_string str, standalone: false, backend: 'docbook', doctype: 'inline' assert_match '[bracket1]', doc.convert, 1 - doc = document_from_string str, :header_footer => false, :backend => 'docbook45', :doctype => 'inline' - assert_match '[bracket1]', doc.convert, 1 + end + + test 'link macro with empty target' do + input = 'Link to link:[this page].' + output = convert_string_to_embedded input + assert_xpath '//a', output, 1 + assert_xpath '//a[@href=""]', output, 1 + end + + test 'should not recognize link macro with double colons' do + input = 'The link::http://example.org[example domain] is reserved for tests and documentation.' + output = convert_string_to_embedded input + assert_includes output, 'link::http://example.org[example domain]' end test 'qualified url surrounded by angled brackets' do - assert_xpath '//a[@href="http://asciidoc.org"][text()="http://asciidoc.org"]', render_string(' is the project page for AsciiDoc.'), 1 + assert_xpath '//a[@href="http://asciidoc.org"][text()="http://asciidoc.org"]', convert_string(' is the project page for AsciiDoc.'), 1 end test 'qualified url surrounded by round brackets' do - assert_xpath '//a[@href="http://asciidoc.org"][text()="http://asciidoc.org"]', render_string('(http://asciidoc.org) is the project page for AsciiDoc.'), 1 + assert_xpath '//a[@href="http://asciidoc.org"][text()="http://asciidoc.org"]', convert_string('(http://asciidoc.org) is the project page for AsciiDoc.'), 1 end test 'qualified url with trailing round bracket' do - assert_xpath '//a[@href="http://asciidoctor.org"][text()="http://asciidoctor.org"]', render_string('Asciidoctor is a Ruby-based AsciiDoc processor (see http://asciidoctor.org)'), 1 + result = convert_string_to_embedded 'Asciidoctor is a Ruby-based AsciiDoc processor (see https://asciidoctor.org)' + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,")")]', result, 1 end test 'qualified url with trailing semi-colon' do - assert_xpath '//a[@href="http://asciidoctor.org"][text()="http://asciidoctor.org"]', render_string('http://asciidoctor.org; where text gets parsed'), 1 + result = convert_string_to_embedded 'https://asciidoctor.org; where text gets parsed' + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,";")]', result, 1 end test 'qualified url with trailing colon' do - assert_xpath '//a[@href="http://asciidoctor.org"][text()="http://asciidoctor.org"]', render_string('http://asciidoctor.org: where text gets parsed'), 1 + result = convert_string_to_embedded 'https://asciidoctor.org: where text gets parsed' + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,":")]', result, 1 end test 'qualified url in round brackets with trailing colon' do - assert_xpath '//a[@href="http://asciidoctor.org"][text()="http://asciidoctor.org"]', render_string('(http://asciidoctor.org): where text gets parsed'), 1 + result = convert_string_to_embedded '(https://asciidoctor.org): where text gets parsed' + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,"):")]', result, 1 + end + + test 'qualified url with trailing round bracket followed by colon' do + result = convert_string_to_embedded '(from https://asciidoctor.org): where text gets parsed' + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(., "):")]', result, 1 + end + + test 'qualified url in round brackets with trailing semi-colon' do + result = convert_string_to_embedded '(https://asciidoctor.org); where text gets parsed' + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(., ");")]', result, 1 + end + + test 'qualified url with trailing round bracket followed by semi-colon' do + result = convert_string_to_embedded '(from https://asciidoctor.org); where text gets parsed' + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 + assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(., ");")]', result, 1 + end + + test 'URI scheme with trailing characters should not be converted to a link' do + input_sources = %w( + (https://) + http://; + file://: + + ) + expected_outputs = %w( + (https://) + http://; + file://: + <ftp://> + ) + input_sources.each_with_index do |input_source, i| + expected_output = expected_outputs[i] + actual = block_from_string input_source + assert_equal expected_output, actual.content + end end test 'qualified url containing round brackets' do - assert_xpath '//a[@href="http://jruby.org/apidocs/org/jruby/Ruby.html#addModule(org.jruby.RubyModule)"][text()="addModule() adds a Ruby module"]', render_string('http://jruby.org/apidocs/org/jruby/Ruby.html#addModule(org.jruby.RubyModule)[addModule() adds a Ruby module]'), 1 + assert_xpath '//a[@href="http://jruby.org/apidocs/org/jruby/Ruby.html#addModule(org.jruby.RubyModule)"][text()="addModule() adds a Ruby module"]', convert_string('http://jruby.org/apidocs/org/jruby/Ruby.html#addModule(org.jruby.RubyModule)[addModule() adds a Ruby module]'), 1 end test 'qualified url adjacent to text in square brackets' do - assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', render_string(']http://asciidoc.org[AsciiDoc] project page.'), 1 + assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', convert_string(']http://asciidoc.org[AsciiDoc] project page.'), 1 end test 'qualified url adjacent to text in round brackets' do - assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', render_string(')http://asciidoc.org[AsciiDoc] project page.'), 1 + assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', convert_string(')http://asciidoc.org[AsciiDoc] project page.'), 1 + end + + test 'qualified url following no-break space' do + assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', convert_string(%(#{[0xa0].pack 'U1'}http://asciidoc.org[AsciiDoc] project page.)), 1 end test 'qualified url following smart apostrophe' do - output = render_embedded_string("l’http://www.irit.fr[IRIT]") + output = convert_string_to_embedded("l’http://www.irit.fr[IRIT]") assert_match(/l’'), 1 + assert_xpath '//a[@href="https://github.com/asciidoctor"]', convert_string('Asciidoctor GitHub organization: <**https://github.com/asciidoctor**>'), 1 + end + + test 'link with quoted text should not be separated into attributes when text contains an equal sign' do + assert_xpath '//a[@href="http://search.example.com"][text()="Google, Yahoo, Bing = Search Engines"]', convert_string_to_embedded('http://search.example.com["Google, Yahoo, Bing = Search Engines"]'), 1 + end + + test 'link with quoted text but no equal sign should carry quotes over to output' do + assert_xpath %(//a[@href="http://search.example.com"][text()='"Google, Yahoo, Bing"']), convert_string_to_embedded('http://search.example.com["Google, Yahoo, Bing"]'), 1 + end + + test 'link with comma in text but no equal sign should not be separated into attributes' do + assert_xpath '//a[@href="http://search.example.com"][text()="Google, Yahoo, Bing"]', convert_string_to_embedded('http://search.example.com[Google, Yahoo, Bing]'), 1 + end + + test 'role and window attributes on link are processed' do + assert_xpath '//a[@href="http://google.com"][@class="external"][@target="_blank"]', convert_string_to_embedded('http://google.com[Google, role=external, window="_blank"]'), 1 end - test 'link with quoted text should not be separated into attributes when linkattrs is set' do - assert_xpath '//a[@href="http://search.example.com"][text()="Google, Yahoo, Bing = Search Engines"]', render_embedded_string('http://search.example.com["Google, Yahoo, Bing = Search Engines"]', :attributes => {'linkattrs' => ''}), 1 + test 'link macro with attributes but no text should use URL as text' do + url = 'https://fonts.googleapis.com/css?family=Roboto:400,400italic,' + assert_xpath %(//a[@href="#{url}"][text()="#{url}"]), convert_string_to_embedded(%(link:#{url}[family=Roboto,weight=400])), 1 end - test 'link with comma in text but no equal sign should not be separated into attributes when linkattrs is set' do - assert_xpath '//a[@href="http://search.example.com"][text()="Google, Yahoo, Bing"]', render_embedded_string('http://search.example.com[Google, Yahoo, Bing]', :attributes => {'linkattrs' => ''}), 1 + test 'link macro with attributes but blank text should use URL as text' do + url = 'https://fonts.googleapis.com/css?family=Roboto:400,400italic,' + assert_xpath %(//a[@href="#{url}"][text()="#{url}"]), convert_string_to_embedded(%(link:#{url}[,family=Roboto,weight=400])), 1 end - test 'role and window attributes on link are processed when linkattrs is set' do - assert_xpath '//a[@href="http://google.com"][@class="external"][@target="_blank"]', render_embedded_string('http://google.com[Google, role="external", window="_blank"]', :attributes => {'linkattrs' => ''}), 1 + test 'link macro with comma but no explicit attributes in text should not parse text' do + url = 'https://fonts.googleapis.com/css?family=Roboto:400,400italic,' + assert_xpath %(//a[@href="#{url}"][text()="Roboto,400"]), convert_string_to_embedded(%(link:#{url}[Roboto,400])), 1 end test 'link text that ends in ^ should set link window to _blank' do - assert_xpath '//a[@href="http://google.com"][@target="_blank"]', render_embedded_string('http://google.com[Google^]'), 1 + assert_xpath '//a[@href="http://google.com"][@target="_blank"]', convert_string_to_embedded('http://google.com[Google^]'), 1 + end + + test 'rel=noopener should be added to a link that targets the _blank window' do + assert_xpath '//a[@href="http://google.com"][@target="_blank"][@rel="noopener"]', convert_string_to_embedded('http://google.com[Google^]'), 1 + end + + test 'rel=noopener should be added to a link that targets a named window when the noopener option is set' do + assert_xpath '//a[@href="http://google.com"][@target="name"][@rel="noopener"]', convert_string_to_embedded('http://google.com[Google,window=name,opts=noopener]'), 1 end - test 'id attribute on link are processed when linkattrs is set' do - assert_xpath '//a[@href="http://google.com"][@id="link-1"]', render_embedded_string('http://google.com[Google, id="link-1"]', :attributes => {'linkattrs' => ''}), 1 + test 'rel=noopener should not be added to a link if it does not target a window' do + result = convert_string_to_embedded 'http://google.com[Google,opts=noopener]' + assert_xpath '//a[@href="http://google.com"]', result, 1 + assert_xpath '//a[@href="http://google.com"][@rel="noopener"]', result, 0 end - test 'title attribute on link are processed when linkattrs is set' do - assert_xpath '//a[@href="http://google.com"][@title="title-1"]', render_embedded_string('http://google.com[Google, title="title-1"]', :attributes => {'linkattrs' => ''}), 1 + test 'rel=nofollow should be added to a link when the nofollow option is set' do + assert_xpath '//a[@href="http://google.com"][@target="name"][@rel="nofollow noopener"]', convert_string_to_embedded('http://google.com[Google,window=name,opts="nofollow,noopener"]'), 1 + end + + test 'id attribute on link is processed' do + assert_xpath '//a[@href="http://google.com"][@id="link-1"]', convert_string_to_embedded('http://google.com[Google, id="link-1"]'), 1 + end + + test 'title attribute on link is processed' do + assert_xpath '//a[@href="http://google.com"][@title="title-1"]', convert_string_to_embedded('http://google.com[Google, title="title-1"]'), 1 end test 'inline irc link' do - assert_xpath '//a[@href="irc://irc.freenode.net"][text()="irc://irc.freenode.net"]', render_embedded_string('irc://irc.freenode.net'), 1 + assert_xpath '//a[@href="irc://irc.freenode.net"][text()="irc://irc.freenode.net"]', convert_string_to_embedded('irc://irc.freenode.net'), 1 end test 'inline irc link with text' do - assert_xpath '//a[@href="irc://irc.freenode.net"][text()="Freenode IRC"]', render_embedded_string('irc://irc.freenode.net[Freenode IRC]'), 1 + assert_xpath '//a[@href="irc://irc.freenode.net"][text()="Freenode IRC"]', convert_string_to_embedded('irc://irc.freenode.net[Freenode IRC]'), 1 end test 'inline ref' do variations = %w([[tigers]] anchor:tigers[]) variations.each do |anchor| doc = document_from_string %(Here you can read about tigers.#{anchor}) - output = doc.render - assert_equal '[tigers]', doc.references[:ids]['tigers'] - assert_xpath '//a[@id = "tigers"]', output, 1 - assert_xpath '//a[@id = "tigers"]/child::text()', output, 0 + output = doc.convert + assert_kind_of Asciidoctor::Inline, doc.catalog[:refs]['tigers'] + assert_nil doc.catalog[:refs]['tigers'].text + assert_xpath '//a[@id="tigers"]', output, 1 + assert_xpath '//a[@id="tigers"]/child::text()', output, 0 end end - test 'inline ref with reftext' do - variations = %w([[tigers,Tigers]] anchor:tigers[Tigers]) + test 'escaped inline ref' do + variations = %w([[tigers]] anchor:tigers[]) variations.each do |anchor| + doc = document_from_string %(Here you can read about tigers.\\#{anchor}) + output = doc.convert + refute doc.catalog[:refs].key?('tigers') + assert_xpath '//a[@id="tigers"]', output, 0 + end + end + + test 'inline ref can start with colon' do + input = '[[:idname]] text' + output = convert_string_to_embedded input + assert_xpath '//a[@id=":idname"]', output, 1 + end + + test 'inline ref cannot start with digit' do + input = '[[1-install]] text' + output = convert_string_to_embedded input + assert_includes output, '[[1-install]]' + assert_xpath '//a[@id = "1-install"]', output, 0 + end + + test 'inline ref with reftext' do + %w([[tigers,Tigers]] anchor:tigers[Tigers]).each do |anchor| doc = document_from_string %(Here you can read about tigers.#{anchor}) - output = doc.render - assert_equal 'Tigers', doc.references[:ids]['tigers'] - assert_xpath '//a[@id = "tigers"]', output, 1 - assert_xpath '//a[@id = "tigers"]/child::text()', output, 0 + output = doc.convert + assert_kind_of Asciidoctor::Inline, doc.catalog[:refs]['tigers'] + assert_equal 'Tigers', doc.catalog[:refs]['tigers'].text + assert_xpath '//a[@id="tigers"]', output, 1 + assert_xpath '//a[@id="tigers"]/child::text()', output, 0 end end - test 'escaped inline ref' do - variations = %w([[tigers]] anchor:tigers[]) - variations.each do |anchor| - doc = document_from_string %(Here you can read about tigers.\\#{anchor}) - output = doc.render - assert !doc.references[:ids].has_key?('tigers') - assert_xpath '//a[@id = "tigers"]', output, 0 + test 'should encode double quotes in reftext of anchor macro in DocBook output' do + input = 'anchor:uncola[the "un"-cola]' + result = convert_inline_string input, backend: :docbook + assert_equal '', result + end + + test 'should substitute attribute references in reftext when registering inline ref' do + %w([[tigers,{label-tigers}]] anchor:tigers[{label-tigers}]).each do |anchor| + doc = document_from_string %(Here you can read about tigers.#{anchor}), attributes: { 'label-tigers' => 'Tigers' } + doc.convert + assert_kind_of Asciidoctor::Inline, doc.catalog[:refs]['tigers'] + assert_equal 'Tigers', doc.catalog[:refs]['tigers'].text + end + end + + test 'inline ref with reftext converted to DocBook' do + %w([[tigers,]] anchor:tigers[]).each do |anchor| + doc = document_from_string %(Here you can read about tigers.#{anchor}), backend: :docbook + output = doc.convert standalone: false + assert_kind_of Asciidoctor::Inline, doc.catalog[:refs]['tigers'] + assert_equal '', doc.catalog[:refs]['tigers'].text + assert_includes output, '' end end + test 'does not match bibliography anchor in prose when scanning for inline anchor' do + doc = document_from_string 'Use [[[label]]] to assign a label to a bibliography entry.' + refute doc.catalog[:refs].key? 'label' + end + + test 'repeating inline anchor macro with empty reftext' do + input = 'anchor:one[] anchor:two[] anchor:three[]' + result = convert_inline_string input + assert_equal ' ', result + end + + test 'mixed inline anchor macro and anchor shorthand with empty reftext' do + input = 'anchor:one[][[two]]anchor:three[][[four]]anchor:five[]' + result = convert_inline_string input + assert_equal '', result + end + + test 'assigns xreflabel value for anchor macro without reftext in DocBook output' do + ['anchor:foo[]bar', '[[foo]]bar'].each do |input| + result = convert_inline_string input, backend: :docbook + assert_equal 'bar', result + end + end + + test 'unescapes square bracket in reftext of anchor macro' do + input = <<~'EOS' + see <> + + anchor:foo[b[a\]r]tex' + EOS + result = convert_string_to_embedded input + assert_includes result, 'see b[a]r' + end + + test 'unescapes square bracket in reftext of anchor macro in DocBook output' do + input = 'anchor:foo[b[a\]r]' + result = convert_inline_string input, backend: :docbook + assert_equal '', result + end + test 'xref using angled bracket syntax' do doc = document_from_string '<>' - doc.references[:ids]['tigers'] = '[tigers]' - assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.render, 1 + doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, '[tigers]', type: :ref, target: 'tigers'), '[tigers]'] + assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.convert, 1 + end + + test 'xref using angled bracket syntax with explicit hash' do + doc = document_from_string '<<#tigers>>' + doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, 'Tigers', type: :ref, target: 'tigers'), 'Tigers'] + assert_xpath '//a[@href="#tigers"][text() = "Tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with label' do - assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', render_string('<>'), 1 + input = <<~'EOS' + <> + + [#tigers] + == Tigers + EOS + assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', convert_string(input), 1 end test 'xref using angled bracket syntax with quoted label' do - assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', render_string('<>'), 1 + input = <<~'EOS' + <> + + [#tigers] + == Tigers + EOS + assert_xpath %q(//a[@href="#tigers"][text() = '"About Tigers"']), convert_string(input), 1 + end + + test 'should not interpret path sans extension in xref with angled bracket syntax in compat mode' do + using_memory_logger do |logger| + doc = document_from_string '<>', standalone: false, attributes: { 'compat-mode' => '' } + assert_xpath '//a[@href="#tigers#"][text() = "[tigers#]"]', doc.convert, 1 + end end test 'xref using angled bracket syntax with path sans extension' do - doc = document_from_string '<>', :header_footer => false - assert_xpath '//a[@href="tigers.html"][text() = "[tigers]"]', doc.render, 1 + doc = document_from_string '<>', standalone: false + assert_xpath '//a[@href="tigers.html"][text() = "tigers.html"]', doc.convert, 1 + end + + test 'inter-document xref shorthand syntax should assume AsciiDoc extension if AsciiDoc extension not present' do + { + 'using-.net-web-services#' => 'Using .NET web services', + 'asciidoctor.1#' => 'Asciidoctor Manual', + 'path/to/document#' => 'Document Title', + }.each do |target, text| + result = convert_string_to_embedded %(<<#{target},#{text}>>) + assert_xpath %(//a[@href="#{target.chop}.html"][text()="#{text}"]), result, 1 + end + end + + test 'xref macro with explicit inter-document target should assume implicit AsciiDoc file extension if no file extension is present' do + { + 'using-.net-web-services#' => 'Using .NET web services', + 'asciidoctor.1#' => 'Asciidoctor Manual', + }.each do |target, text| + result = convert_string_to_embedded %(xref:#{target}[#{text}]) + assert_xpath %(//a[@href="#{target.chop}"][text()="#{text}"]), result, 1 + end + { + 'document#' => 'Document Title', + 'path/to/document#' => 'Document Title', + 'include.d/document#' => 'Document Title', + }.each do |target, text| + result = convert_string_to_embedded %(xref:#{target}[#{text}]) + assert_xpath %(//a[@href="#{target.chop}.html"][text()="#{text}"]), result, 1 + end + end + + test 'xref macro with implicit inter-document target should preserve path with file extension' do + { + 'refcard.pdf' => 'Refcard', + 'asciidoctor.1' => 'Asciidoctor Manual', + }.each do |path, text| + result = convert_string_to_embedded %(xref:#{path}[#{text}]) + assert_xpath %(//a[@href="#{path}"][text()="#{text}"]), result, 1 + end + { + 'sections.d/first' => 'First Section', + }.each do |path, text| + result = convert_string_to_embedded %(xref:#{path}[#{text}]) + assert_xpath %(//a[@href="##{path}"][text()="#{text}"]), result, 1 + end + end + + test 'inter-document xref should only remove the file extension part if the path contains a period elsewhere' do + result = convert_string_to_embedded '<>' + assert_xpath '//a[@href="using-.net-web-services.html"][text() = "Using .NET web services"]', result, 1 + end + + test 'xref macro target containing dot should be interpreted as a path unless prefixed by #' do + result = convert_string_to_embedded 'xref:using-.net-web-services[Using .NET web services]' + assert_xpath '//a[@href="using-.net-web-services"][text() = "Using .NET web services"]', result, 1 + result = convert_string_to_embedded 'xref:#using-.net-web-services[Using .NET web services]' + assert_xpath '//a[@href="#using-.net-web-services"][text() = "Using .NET web services"]', result, 1 end test 'xref using angled bracket syntax with path sans extension using docbook backend' do - doc = document_from_string '<>', :header_footer => false, :backend => 'docbook' - assert_match 'tigers.xml', doc.render, 1 - doc = document_from_string '<>', :header_footer => false, :backend => 'docbook45' - assert_match 'tigers.xml', doc.render, 1 + doc = document_from_string '<>', standalone: false, backend: 'docbook' + assert_match 'tigers.xml', doc.convert, 1 end test 'xref using angled bracket syntax with ancestor path sans extension' do - doc = document_from_string '<<../tigers#,tigers>>', :header_footer => false - assert_xpath '//a[@href="../tigers.html"][text() = "tigers"]', doc.render, 1 + doc = document_from_string '<<../tigers#,tigers>>', standalone: false + assert_xpath '//a[@href="../tigers.html"][text() = "tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with absolute path sans extension' do - doc = document_from_string '<>', :header_footer => false - assert_xpath '//a[@href="/path/to/tigers.html"][text() = "tigers"]', doc.render, 1 + doc = document_from_string '<>', standalone: false + assert_xpath '//a[@href="/path/to/tigers.html"][text() = "tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with path and extension' do - doc = document_from_string '<>', :header_footer => false - assert_xpath '//a[@href="tigers.html"][text() = "[tigers]"]', doc.render, 1 + using_memory_logger do |logger| + doc = document_from_string '<>', standalone: false + assert_xpath '//a[@href="#tigers.adoc"][text() = "[tigers.adoc]"]', doc.convert, 1 + end + end + + test 'xref using angled bracket syntax with path and extension with hash' do + doc = document_from_string '<>', standalone: false + assert_xpath '//a[@href="tigers.html"][text() = "tigers.html"]', doc.convert, 1 + end + + test 'xref using angled bracket syntax with path and extension with fragment' do + doc = document_from_string '<>', standalone: false + assert_xpath '//a[@href="tigers.html#id"][text() = "tigers.html"]', doc.convert, 1 + end + + test 'xref using macro syntax with path and extension in compat mode' do + using_memory_logger do |logger| + doc = document_from_string 'xref:tigers.adoc[]', standalone: false, attributes: { 'compat-mode' => '' } + assert_xpath '//a[@href="#tigers.adoc"][text() = "[tigers.adoc]"]', doc.convert, 1 + end + end + + test 'xref using macro syntax with path and extension' do + doc = document_from_string 'xref:tigers.adoc[]', standalone: false + assert_xpath '//a[@href="tigers.html"][text() = "tigers.html"]', doc.convert, 1 end test 'xref using angled bracket syntax with path and fragment' do - doc = document_from_string '<>', :header_footer => false - assert_xpath '//a[@href="tigers.html#about"][text() = "[tigers#about]"]', doc.render, 1 + doc = document_from_string '<>', standalone: false + assert_xpath '//a[@href="tigers.html#about"][text() = "tigers.html"]', doc.convert, 1 end test 'xref using angled bracket syntax with path, fragment and text' do - doc = document_from_string '<>', :header_footer => false - assert_xpath '//a[@href="tigers.html#about"][text() = "About Tigers"]', doc.render, 1 + doc = document_from_string '<>', standalone: false + assert_xpath '//a[@href="tigers.html#about"][text() = "About Tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with path and custom relfilesuffix and outfilesuffix' do - attributes = {'relfileprefix' => '../', 'outfilesuffix' => '/'} - doc = document_from_string '<>', :header_footer => false, :attributes => attributes - assert_xpath '//a[@href="../tigers/#about"][text() = "About Tigers"]', doc.render, 1 + attributes = { 'relfileprefix' => '../', 'outfilesuffix' => '/' } + doc = document_from_string '<>', standalone: false, attributes: attributes + assert_xpath '//a[@href="../tigers/#about"][text() = "About Tigers"]', doc.convert, 1 + end + + test 'xref using angled bracket syntax with path and custom relfilesuffix' do + attributes = { 'relfilesuffix' => '/' } + doc = document_from_string '<>', standalone: false, attributes: attributes + assert_xpath '//a[@href="tigers/#about"][text() = "About Tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with path which has been included in this document' do - doc = document_from_string '<>', :header_footer => false - doc.references[:includes] << 'tigers' - assert_xpath '//a[@href="#about"][text() = "About Tigers"]', doc.render, 1 + using_memory_logger do |logger| + in_verbose_mode do + doc = document_from_string '<>', standalone: false + doc.catalog[:includes]['tigers'] = true + output = doc.convert + assert_xpath '//a[@href="#about"][text() = "About Tigers"]', output, 1 + assert_message logger, :INFO, 'possible invalid reference: about' + end + end end test 'xref using angled bracket syntax with nested path which has been included in this document' do - doc = document_from_string '<>', :header_footer => false - doc.references[:includes] << 'part1/tigers' - assert_xpath '//a[@href="#about"][text() = "About Tigers"]', doc.render, 1 + using_memory_logger do |logger| + in_verbose_mode do + doc = document_from_string '<>', standalone: false + doc.catalog[:includes]['part1/tigers'] = true + output = doc.convert + assert_xpath '//a[@href="#about"][text() = "About Tigers"]', output, 1 + assert_message logger, :INFO, 'possible invalid reference: about' + end + end end test 'xref using angled bracket syntax inline with text' do - assert_xpath '//a[@href="#tigers"][text() = "about tigers"]', render_string('Want to learn <>?'), 1 + input = <<~'EOS' + Want to learn <>? + + [#tigers] + == Tigers + EOS + assert_xpath '//a[@href="#tigers"][text() = "about tigers"]', convert_string(input), 1 end test 'xref using angled bracket syntax with multi-line label inline with text' do - assert_xpath %{//a[@href="#tigers"][normalize-space(text()) = "about tigers"]}, render_string("Want to learn <>?"), 1 + input = <<~'EOS' + Want to learn <>? + + [#tigers] + == Tigers + EOS + assert_xpath %{//a[@href="#tigers"][normalize-space(text()) = "about tigers"]}, convert_string(input), 1 end test 'xref with escaped text' do # when \x0 was used as boundary character for passthrough, it was getting stripped # now using unicode marks as boundary characters, which resolves issue - input = 'See the <> section for data about tigers' - output = render_embedded_string input + input = <<~'EOS' + See the <> section for details about tigers. + + [#tigers] + == Tigers + EOS + output = convert_string_to_embedded input assert_xpath %(//a[@href="#tigers"]/code[text()="[tigers]"]), output, 1 end + test 'xref with target that begins with attribute reference in title' do + ['<<{lessonsdir}/lesson-1#,Lesson 1>>', 'xref:{lessonsdir}/lesson-1.adoc[Lesson 1]'].each do |xref| + input = <<~EOS + :lessonsdir: lessons + + [#lesson-1-listing] + == #{xref} + + A summary of the first lesson. + EOS + + output = convert_string_to_embedded input + assert_xpath '//h2/a[@href="lessons/lesson-1.html"]', output, 1 + end + end + test 'xref using macro syntax' do doc = document_from_string 'xref:tigers[]' - doc.references[:ids]['tigers'] = '[tigers]' - assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.render, 1 + doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, '[tigers]', type: :ref, target: 'tigers'), '[tigers]'] + assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.convert, 1 + end + + test 'multiple xref macros with implicit text in single line' do + input = <<~'EOS' + This document has two sections, xref:sect-a[] and xref:sect-b[]. + + [#sect-a] + == Section A + + [#sect-b] + == Section B + EOS + result = convert_string_to_embedded input + assert_xpath '//a[@href="#sect-a"][text() = "Section A"]', result, 1 + assert_xpath '//a[@href="#sect-b"][text() = "Section B"]', result, 1 + end + + test 'xref using macro syntax with explicit hash' do + doc = document_from_string 'xref:#tigers[]' + doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, 'Tigers', type: :ref, target: 'tigers'), 'Tigers'] + assert_xpath '//a[@href="#tigers"][text() = "Tigers"]', doc.convert, 1 end test 'xref using macro syntax with label' do - assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', render_string('xref:tigers[About Tigers]'), 1 + input = <<~'EOS' + xref:tigers[About Tigers] + + [#tigers] + == Tigers + EOS + assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', convert_string(input), 1 end test 'xref using macro syntax inline with text' do - assert_xpath '//a[@href="#tigers"][text() = "about tigers"]', render_string('Want to learn xref:tigers[about tigers]?'), 1 + input = <<~'EOS' + Want to learn xref:tigers[about tigers]? + + [#tigers] + == Tigers + EOS + + assert_xpath '//a[@href="#tigers"][text() = "about tigers"]', convert_string(input), 1 end test 'xref using macro syntax with multi-line label inline with text' do - assert_xpath %{//a[@href="#tigers"][normalize-space(text()) = "about tigers"]}, render_string("Want to learn xref:tigers[about\ntigers]?"), 1 + input = <<~'EOS' + Want to learn xref:tigers[about + tigers]? + + [#tigers] + == Tigers + EOS + assert_xpath %{//a[@href="#tigers"][normalize-space(text()) = "about tigers"]}, convert_string(input), 1 + end + + test 'xref using macro syntax with text that ends with an escaped closing bracket' do + input = <<~'EOS' + xref:tigers[[tigers\]] + + [#tigers] + == Tigers + EOS + assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', convert_string_to_embedded(input), 1 + end + + test 'xref using macro syntax with text that contains an escaped closing bracket' do + input = <<~'EOS' + xref:tigers[[tigers\] are cats] + + [#tigers] + == Tigers + EOS + assert_xpath '//a[@href="#tigers"][text() = "[tigers] are cats"]', convert_string_to_embedded(input), 1 + end + + test 'unescapes square bracket in reftext used by xref' do + input = <<~'EOS' + anchor:foo[b[a\]r]about + + see <> + EOS + result = convert_string_to_embedded input + assert_xpath '//a[@href="#foo"]', result, 1 + assert_xpath '//a[@href="#foo"][text()="b[a]r"]', result, 1 end test 'xref using invalid macro syntax does not create link' do doc = document_from_string 'xref:tigers' - doc.references[:ids]['tigers'] = '[tigers]' - assert_xpath '//a', doc.render, 0 + doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, 'Tigers', type: :ref, target: 'tigers'), 'Tigers'] + assert_xpath '//a', doc.convert, 0 end - test 'xref creates link for unknown reference' do - doc = document_from_string '<>' - assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.render, 1 + test 'should warn and create link if verbose flag is set and reference is not found' do + input = <<~'EOS' + [#foobar] + == Foobar + + == Section B + + See <>. + EOS + using_memory_logger do |logger| + in_verbose_mode do + output = convert_string_to_embedded input + assert_xpath '//a[@href="#foobaz"][text() = "[foobaz]"]', output, 1 + assert_message logger, :INFO, 'possible invalid reference: foobaz' + end + end + end + + test 'should warn and create link if verbose flag is set and reference using # notation is not found' do + input = <<~'EOS' + [#foobar] + == Foobar + + == Section B + + See <<#foobaz>>. + EOS + using_memory_logger do |logger| + in_verbose_mode do + output = convert_string_to_embedded input + assert_xpath '//a[@href="#foobaz"][text() = "[foobaz]"]', output, 1 + assert_message logger, :INFO, 'possible invalid reference: foobaz' + end + end + end + + test 'should produce an internal anchor from an inter-document xref to file included into current file' do + input = <<~'EOS' + = Book Title + :doctype: book + + [#ch1] + == Chapter 1 + + So it begins. + + Read <> to find out what happens next! + + include::other-chapters.adoc[] + EOS + + doc = document_from_string input, safe: :safe, base_dir: fixturedir + assert doc.catalog[:includes].key?('other-chapters') + assert doc.catalog[:includes]['other-chapters'] + output = doc.convert + assert_xpath '//a[@href="#ch2"][text()="Chapter 2"]', output, 1 + end + + test 'should produce an internal anchor from an inter-document xref to file included entirely into current file using tags' do + input = <<~'EOS' + = Book Title + :doctype: book + + [#ch1] + == Chapter 1 + + So it begins. + + Read <> to find out what happens next! + + include::other-chapters.adoc[tags=**] + EOS + + output = convert_string_to_embedded input, safe: :safe, base_dir: fixturedir + assert_xpath '//a[@href="#ch2"][text()="Chapter 2"]', output, 1 + end + + test 'should not produce an internal anchor for inter-document xref to file partially included into current file' do + input = <<~'EOS' + = Book Title + :doctype: book + + [#ch1] + == Chapter 1 + + So it begins. + + Read <> to find out what happens next! + + include::other-chapters.adoc[tags=ch2] + EOS + + doc = document_from_string input, safe: :safe, base_dir: fixturedir + assert doc.catalog[:includes].key?('other-chapters') + refute doc.catalog[:includes]['other-chapters'] + output = doc.convert + assert_xpath '//a[@href="other-chapters.html#ch2"][text()="the next chapter"]', output, 1 + end + + test 'should warn and create link if debug mode is enabled, inter-document xref points to current doc, and reference not found' do + input = <<~'EOS' + [#foobar] + == Foobar + + == Section B + + See <>. + EOS + using_memory_logger do |logger| + in_verbose_mode do + output = convert_string_to_embedded input, attributes: { 'docname' => 'test' } + assert_xpath '//a[@href="#foobaz"][text() = "[foobaz]"]', output, 1 + assert_message logger, :INFO, 'possible invalid reference: foobaz' + end + end + end + + test 'should produce an internal anchor for inter-document xref to file outside of base directory' do + input = <<~'EOS' + = Document Title + + See <<../section-a.adoc#section-a>>. + + include::../section-a.adoc[] + EOS + + doc = document_from_string input, safe: :unsafe, base_dir: (File.join fixturedir, 'subdir') + assert_includes doc.catalog[:includes], '../section-a' + output = doc.convert standalone: false + assert_xpath '//a[@href="#section-a"][text()="Section A"]', output, 1 end - test 'xref shows label from title of target for forward and backward references in html backend' do - input = <<-EOS -== Section A + test 'xref uses title of target as label for forward and backward references in html output' do + input = <<~'EOS' + == Section A -<\<_section_b>> + <<_section_b>> -== Section B + == Section B -<\<_section_a>> + <<_section_a>> EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//h2[@id="_section_a"][text()="Section A"]', output, 1 assert_xpath '//a[@href="#_section_a"][text()="Section A"]', output, 1 assert_xpath '//h2[@id="_section_b"][text()="Section B"]', output, 1 assert_xpath '//a[@href="#_section_b"][text()="Section B"]', output, 1 end + test 'should not fail to resolve broken xref in title of block with ID' do + input = <<~'EOS' + [#p1] + .<> + paragraph text + EOS + + output = convert_string_to_embedded input + assert_xpath '//*[@class="title"]/a[@href="#DNE"][text()="[DNE]"]', output, 1 + end + + test 'should resolve forward xref in title of block with ID' do + input = <<~'EOS' + [#p1] + .<> + paragraph text + + [#conclusion] + == Conclusion + EOS + + output = convert_string_to_embedded input + assert_xpath '//*[@class="title"]/a[@href="#conclusion"][text()="Conclusion"]', output, 1 + end + + test 'should not fail to resolve broken xref in section title' do + input = <<~'EOS' + [#s1] + == <> + + == <> + EOS + + # NOTE this output is nonsensical, but we still need to verify the scenario + output = convert_string_to_embedded input + assert_xpath '//a[@href="#DNE"][text()="[DNE]"]', output, 2 + end + + test 'should not resolve forward xref evaluated during parsing' do + input = <<~'EOS' + [#s1] + == <> + + == <> + + [#forward] + == Forward + EOS + + output = convert_string_to_embedded input + assert_xpath '//a[@href="#forward"][text()="Forward"]', output, 0 + end + + test 'should not resolve forward natural xref evaluated during parsing' do + input = <<~'EOS' + :idprefix: + + [#s1] + == <> + + == <> + + == Forward + EOS + + output = convert_string_to_embedded input + assert_xpath '//a[@href="#forward"][text()="Forward"]', output, 0 + end + + test 'should resolve first matching natural xref' do + input = <<~'EOS' + see <
    > + + [#s1] + == Section Title + + [#s2] + == Section Title + EOS + + output = convert_string_to_embedded input + assert_xpath '//a[@href="#s1"]', output, 1 + assert_xpath '//a[@href="#s1"][text()="Section Title"]', output, 1 + end + test 'anchor creates reference' do - doc = document_from_string "[[tigers]]Tigers roam here." - assert_equal({'tigers' => '[tigers]'}, doc.references[:ids]) + doc = document_from_string '[[tigers]]Tigers roam here.' + ref = doc.catalog[:refs]['tigers'] + refute_nil ref + assert_nil ref.reftext end test 'anchor with label creates reference' do - doc = document_from_string "[[tigers,Tigers]]Tigers roam here." - assert_equal({'tigers' => 'Tigers'}, doc.references[:ids]) + doc = document_from_string '[[tigers,Tigers]]Tigers roam here.' + ref = doc.catalog[:refs]['tigers'] + refute_nil ref + assert_equal 'Tigers', ref.reftext end test 'anchor with quoted label creates reference with quoted label text' do doc = document_from_string %([[tigers,"Tigers roam here"]]Tigers roam here.) - assert_equal({'tigers' => '"Tigers roam here"'}, doc.references[:ids]) + ref = doc.catalog[:refs]['tigers'] + refute_nil ref + assert_equal '"Tigers roam here"', ref.reftext end test 'anchor with label containing a comma creates reference' do doc = document_from_string %([[tigers,Tigers, scary tigers, roam here]]Tigers roam here.) - assert_equal({'tigers' => 'Tigers, scary tigers, roam here'}, doc.references[:ids]) + ref = doc.catalog[:refs]['tigers'] + refute_nil ref + assert_equal 'Tigers, scary tigers, roam here', ref.reftext end end diff -Nru asciidoctor-1.5.5/test/lists_test.rb asciidoctor-2.0.10/test/lists_test.rb --- asciidoctor-1.5.5/test/lists_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/lists_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,113 +1,110 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context "Bulleted lists (:ulist)" do context "Simple lists" do test "dash elements with no blank lines" do - input = <<-EOS -List -==== - -- Foo -- Boo -- Blech + input = <<~'EOS' + List + ==== + + - Foo + - Boo + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented dash elements using spaces' do - input = <<-EOS - - Foo - - Boo - - Blech + input = <<~EOS + \x20- Foo + \x20- Boo + \x20- Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented dash elements using tabs' do - input = <<-EOS -\t-\tFoo -\t-\tBoo -\t-\tBlech + input = <<~EOS + \t-\tFoo + \t-\tBoo + \t-\tBlech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test "dash elements separated by blank lines should merge lists" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -- Foo + - Foo -- Boo + - Boo -- Blech + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'dash elements with interspersed line comments should be skipped and not break list' do - input = <<-EOS -== List + input = <<~'EOS' + == List -- Foo -// line comment -// another line comment -- Boo -// line comment -more text -// another line comment -- Blech + - Foo + // line comment + // another line comment + - Boo + // line comment + more text + // another line comment + - Blech EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath %((//ul/li)[2]/p[text()="Boo\nmore text"]), output, 1 end test "dash elements separated by a line comment offset by blank lines should not merge lists" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -- Foo -- Boo + - Foo + - Boo -// + // -- Blech + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test "dash elements separated by a block title offset by a blank line should not merge lists" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -- Foo -- Boo + - Foo + - Boo -.Also -- Blech + .Also + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 @@ -115,131 +112,132 @@ end test "dash elements separated by an attribute entry offset by a blank line should not merge lists" do - input = <<-EOS -== List + input = <<~'EOS' + == List -- Foo -- Boo + - Foo + - Boo -:foo: bar -- Blech + :foo: bar + - Blech EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test 'a non-indented wrapped line is folded into text of list item' do - input = <<-EOS -List -==== - -- Foo -wrapped content -- Boo -- Blech + input = <<~'EOS' + List + ==== + + - Foo + wrapped content + - Boo + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\nwrapped content']", output, 1 end test 'a non-indented wrapped line that resembles a block title is folded into text of list item' do - input = <<-EOS -== List + input = <<~'EOS' + == List -- Foo -.wrapped content -- Boo -- Blech + - Foo + .wrapped content + - Boo + - Blech EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\n.wrapped content']", output, 1 end test 'a non-indented wrapped line that resembles an attribute entry is folded into text of list item' do - input = <<-EOS -== List + input = <<~'EOS' + == List -- Foo -:foo: bar -- Boo -- Blech + - Foo + :foo: bar + - Boo + - Blech EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\n:foo: bar']", output, 1 end test 'a list item with a nested marker terminates non-indented paragraph for text of list item' do - input = <<-EOS -- Foo -Bar -* Foo + input = <<~'EOS' + - Foo + Bar + * Foo EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul ul', output, 1 - assert !output.include?('* Foo') + refute_includes output, '* Foo' end test 'a list item for a different list terminates non-indented paragraph for text of list item' do - input = <<-EOS -== Example 1 + input = <<~'EOS' + == Example 1 -- Foo -Bar -. Foo + - Foo + Bar + . Foo -== Example 2 + == Example 2 -* Item -text -term:: def + * Item + text + term:: def EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul ol', output, 1 - assert !output.include?('* Foo') + refute_includes output, '* Foo' assert_css 'ul dl', output, 1 - assert !output.include?('term:: def') + refute_includes output, 'term:: def' end test 'an indented wrapped line is unindented and folded into text of list item' do - input = <<-EOS -List -==== - -- Foo - wrapped content -- Boo -- Blech + input = <<~'EOS' + List + ==== + + - Foo + wrapped content + - Boo + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\nwrapped content']", output, 1 end test 'wrapped list item with hanging indent followed by non-indented line' do - input = <<-EOS -== Lists - -- list item 1 - // not line comment -second wrapped line -- list item 2 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists + + - list item 1 + // not line comment + second wrapped line + - list item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul li', output, 2 # NOTE for some reason, we're getting an extra line after the indented line - lines = xmlnodes_at_xpath('(//ul/li)[1]/p', output, 1).text.gsub(/\n[[:space:]]*\n/, "\n").lines.entries + lines = xmlnodes_at_xpath('(//ul/li)[1]/p', output, 1).text.gsub(/\n[[:space:]]*\n/, ?\n).lines assert_equal 3, lines.size assert_equal 'list item 1', lines[0].chomp assert_equal ' // not line comment', lines[1].chomp @@ -247,64 +245,67 @@ end test 'a list item with a nested marker terminates indented paragraph for text of list item' do - input = <<-EOS -- Foo - Bar -* Foo + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + - Foo + Bar + * Foo EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul ul', output, 1 - assert !output.include?('* Foo') + refute_includes output, '* Foo' end test 'a list item that starts with a sequence of list markers characters should not match a nested list' do - input = <<-EOS - * first item - *. normal text + input = <<~EOS + \x20* first item + \x20*. normal text EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul li', output, 1 assert_xpath "//ul/li/p[text()='first item\n*. normal text']", output, 1 end test 'a list item for a different list terminates indented paragraph for text of list item' do - input = <<-EOS -== Example 1 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Example 1 -- Foo - Bar -. Foo + - Foo + Bar + . Foo -== Example 2 + == Example 2 -* Item - text -term:: def + * Item + text + term:: def EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul ol', output, 1 - assert !output.include?('* Foo') + refute_includes output, '* Foo' assert_css 'ul dl', output, 1 - assert !output.include?('term:: def') + refute_includes output, 'term:: def' end test "a literal paragraph offset by blank lines in list content is appended as a literal block" do - input = <<-EOS -List -==== + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + List + ==== -- Foo + - Foo - literal + literal -- Boo -- Blech + - Boo + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 @@ -313,21 +314,43 @@ assert_xpath '((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text() = "literal"]', output, 1 end + test 'should escape special characters in all literal paragraphs attached to list item' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + * first item + + text + + more text + + * second item + EOS + + output = convert_string_to_embedded input + assert_css 'li', output, 2 + assert_css 'code', output, 0 + assert_css 'li:first-of-type > *', output, 3 + assert_css 'li:first-of-type pre', output, 2 + assert_xpath '((//li)[1]//pre)[1][text()="text"]', output, 1 + assert_xpath '((//li)[1]//pre)[2][text()="more text"]', output, 1 + end + test "a literal paragraph offset by a blank line in list content followed by line with continuation is appended as two blocks" do - input = <<-EOS -List -==== + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + List + ==== -- Foo + - Foo - literal -+ -para + literal + + + para -- Boo -- Blech + - Boo + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 @@ -339,14 +362,15 @@ end test 'an admonition paragraph attached by a line continuation to a list item with wrapped text should produce admonition' do - input = <<-EOS -- first-line text - wrapped text -+ -NOTE: This is a note. + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + - first-line text + wrapped text + + + NOTE: This is a note. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul > li', output, 1 assert_css 'ul > li > p', output, 1 @@ -355,16 +379,82 @@ assert_xpath '//ul/li/*[@class="admonitionblock note"]//td[@class="content"][normalize-space(text())="This is a note."]', output, 1 end + test 'paragraph-like blocks attached to an ancestory list item by a list continuation should produce blocks' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + * parent + ** child + + + + NOTE: This is a note. + + * another parent + ** another child + + + + ''' + EOS + + output = convert_string_to_embedded input + assert_css 'ul ul .admonitionblock.note', output, 0 + assert_xpath '(//ul)[1]/li/*[@class="admonitionblock note"]', output, 1 + assert_css 'ul ul hr', output, 0 + assert_xpath '(//ul)[1]/li/hr', output, 1 + end + + test 'should not inherit block attributes from previous block when block is attached using a list continuation' do + input = <<~'EOS' + * complex list item + + + [source,xml] + ---- + value + ---- + <1> a configuration value + EOS + + doc = document_from_string input + colist = doc.blocks[0].items[0].blocks[-1] + assert_equal :colist, colist.context + refute_equal 'source', colist.style + output = doc.convert standalone: false + assert_css 'ul', output, 1 + assert_css 'ul > li', output, 1 + assert_css 'ul > li > p', output, 1 + assert_css 'ul > li > .listingblock', output, 1 + assert_css 'ul > li > .colist', output, 1 + end + + test 'should continue to parse blocks attached by a list continuation after block is dropped' do + input = <<~'EOS' + * item + + + paragraph + + + [comment] + comment + + + ==== + example + ==== + ''' + EOS + + output = convert_string_to_embedded input + assert_css 'ul > li > .paragraph', output, 1 + assert_css 'ul > li > .exampleblock', output, 1 + end + test 'appends line as paragraph if attached by continuation following line comment' do - input = <<-EOS -- list item 1 -// line comment -+ -paragraph in list item 1 + input = <<~'EOS' + - list item 1 + // line comment + + + paragraph in list item 1 -- list item 2 + - list item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul li', output, 2 assert_xpath '(//ul/li)[1]/p[text()="list item 1"]', output, 1 @@ -374,17 +464,18 @@ end test "a literal paragraph with a line that appears as a list item that is followed by a continuation should create two blocks" do - input = <<-EOS -* Foo -+ - literal -. still literal -+ -para + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + * Foo + + + literal + . still literal + + + para -* Bar + * Bar EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 @@ -396,42 +487,44 @@ end test "consecutive literal paragraph offset by blank lines in list content are appended as a literal blocks" do - input = <<-EOS -List -==== + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + List + ==== -- Foo + - Foo - literal + literal - more - literal + more + literal -- Boo -- Blech + - Boo + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 2 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 2 - assert_xpath '((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text() = "literal"]', output, 1 - assert_xpath "((//ul/li)[1]/*[@class='literalblock'])[2]//pre[text() = 'more\nliteral']", output, 1 + assert_xpath '((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text()="literal"]', output, 1 + assert_xpath "((//ul/li)[1]/*[@class='literalblock'])[2]//pre[text()='more\nliteral']", output, 1 end test "a literal paragraph without a trailing blank line consumes following list items" do - input = <<-EOS -List -==== - -- Foo - - literal -- Boo -- Blech + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + List + ==== + + - Foo + + literal + - Boo + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 1 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 @@ -441,132 +534,132 @@ end test "asterisk elements with no blank lines" do - input = <<-EOS -List -==== - -* Foo -* Boo -* Blech + input = <<~'EOS' + List + ==== + + * Foo + * Boo + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented asterisk elements using spaces' do - input = <<-EOS - * Foo - * Boo - * Blech + input = <<~EOS + \x20* Foo + \x20* Boo + \x20* Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented unicode bullet elements using spaces' do - input = <<-EOS - • Foo - • Boo - • Blech + input = <<~EOS + \x20• Foo + \x20• Boo + \x20• Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 - end if ::RUBY_MIN_VERSION_1_9 + end test 'indented asterisk elements using tabs' do - input = <<-EOS -\t*\tFoo -\t*\tBoo -\t*\tBlech + input = <<~EOS + \t*\tFoo + \t*\tBoo + \t*\tBlech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'should represent block style as style class' do ['disc', 'square', 'circle'].each do |style| - input = <<-EOS -[#{style}] -* a -* b -* c + input = <<~EOS + [#{style}] + * a + * b + * c EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css ".ulist.#{style}", output, 1 assert_css ".ulist.#{style} ul.#{style}", output, 1 end end test "asterisk elements separated by blank lines should merge lists" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -* Foo + * Foo -* Boo + * Boo -* Blech + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'asterisk elements with interspersed line comments should be skipped and not break list' do - input = <<-EOS -== List + input = <<~'EOS' + == List -* Foo -// line comment -// another line comment -* Boo -// line comment -more text -// another line comment -* Blech + * Foo + // line comment + // another line comment + * Boo + // line comment + more text + // another line comment + * Blech EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath %((//ul/li)[2]/p[text()="Boo\nmore text"]), output, 1 end test "asterisk elements separated by a line comment offset by blank lines should not merge lists" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -* Foo -* Boo + * Foo + * Boo -// + // -* Blech + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test "asterisk elements separated by a block title offset by a blank line should not merge lists" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -* Foo -* Boo + * Foo + * Boo -.Also -* Blech + .Also + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 @@ -574,85 +667,109 @@ end test "asterisk elements separated by an attribute entry offset by a blank line should not merge lists" do - input = <<-EOS -== List + input = <<~'EOS' + == List -* Foo -* Boo + * Foo + * Boo -:foo: bar -* Blech + :foo: bar + * Blech EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test "list should terminate before next lower section heading" do - input = <<-EOS -List -==== - -* first -item -* second -item + input = <<~'EOS' + List + ==== + + * first + item + * second + item -== Section + == Section EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//h2[text() = "Section"]', output, 1 end test "list should terminate before next lower section heading with implicit id" do - input = <<-EOS -List -==== - -* first -item -* second -item + input = <<~'EOS' + List + ==== + + * first + item + * second + item -[[sec]] -== Section + [[sec]] + == Section EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//h2[@id = "sec"][text() = "Section"]', output, 1 end test 'should not find section title immediately below last list item' do - input = <<-EOS -* first -* second -== Not a section + input = <<~'EOS' + * first + * second + == Not a section EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul > li', output, 2 assert_css 'h2', output, 0 - assert output.include?('== Not a section') + assert_includes output, '== Not a section' assert_xpath %((//li)[2]/p[text() = "second\n== Not a section"]), output, 1 end + + test 'should match trailing line separator in text of list item' do + input = <<~EOS.chop + * a + * b#{decode_char 8232} + * c + EOS + + output = convert_string input + assert_css 'li', output, 3 + assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}"]), output, 1 + end + + test 'should match line separator in text of list item' do + input = <<~EOS.chop + * a + * b#{decode_char 8232}b + * c + EOS + + output = convert_string input + assert_css 'li', output, 3 + assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}b"]), output, 1 + end end context "Lists with inline markup" do test "quoted text" do - input = <<-EOS -List -==== - -- I am *strong*. -- I am _stressed_. -- I am `flexible`. + input = <<~'EOS' + List + ==== + + - I am *strong*. + - I am _stressed_. + - I am `flexible`. EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]//strong', output, 1 @@ -661,15 +778,15 @@ end test "attribute substitutions" do - input = <<-EOS -List -==== -:foo: bar + input = <<~'EOS' + List + ==== + :foo: bar -- side a {vbar} side b -- Take me to a {foo}. + - side a {vbar} side b + - Take me to a {foo}. EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '(//ul/li)[1]//p[text() = "side a | side b"]', output, 1 @@ -677,12 +794,12 @@ end test "leading dot is treated as text not block title" do - input = <<-EOS -* .first -* .second -* .third + input = <<~'EOS' + * .first + * .second + * .third EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 %w(.first .second .third).each_with_index do |text, index| @@ -691,28 +808,93 @@ end test "word ending sentence on continuing line not treated as a list item" do - input = <<-EOS -A. This is the story about - AsciiDoc. It begins here. -B. And it ends here. + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + A. This is the story about + AsciiDoc. It begins here. + B. And it ends here. EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 2 end + + test 'should discover anchor at start of unordered list item text and register it as a reference' do + input = <<~'EOS' + The highest peak in the Front Range is <>, which tops <> by just a few feet. + + * [[mount-evans,Mount Evans]]At 14,271 feet, Mount Evans is the highest summit of the Chicago Peaks in the Front Range of the Rocky Mountains. + * [[grays-peak,Grays Peak]] + Grays Peak rises to 14,278 feet, making it the highest summit in the Front Range of the Rocky Mountains. + * Longs Peak is a 14,259-foot high, prominent mountain summit in the northern Front Range of the Rocky Mountains. + * Pikes Peak is the highest summit of the southern Front Range of the Rocky Mountains at 14,115 feet. + EOS + + doc = document_from_string input + refs = doc.catalog[:refs] + assert refs.key?('mount-evans') + assert refs.key?('grays-peak') + output = doc.convert standalone: false + assert_xpath '(//p)[1]/a[@href="#grays-peak"][text()="Grays Peak"]', output, 1 + assert_xpath '(//p)[1]/a[@href="#mount-evans"][text()="Mount Evans"]', output, 1 + end + + test 'should discover anchor at start of ordered list item text and register it as a reference' do + input = <<~'EOS' + This is a cross-reference to <>. + This is a cross-reference to <>. + + . Ordered list, item 1, without anchor + . [[step-2,Step 2]]Ordered list, item 2, with anchor + . Ordered list, item 3, without anchor + . [[step-4,Step 4]]Ordered list, item 4, with anchor + EOS + + doc = document_from_string input + refs = doc.catalog[:refs] + assert refs.key?('step-2') + assert refs.key?('step-4') + output = doc.convert standalone: false + assert_xpath '(//p)[1]/a[@href="#step-2"][text()="Step 2"]', output, 1 + assert_xpath '(//p)[1]/a[@href="#step-4"][text()="Step 4"]', output, 1 + end + + test 'should discover anchor at start of callout list item text and register it as a reference' do + input = <<~'EOS' + This is a cross-reference to <>. + + [source,ruby] + ---- + require 'sinatra' <1> + + get '/hi' do <2> <3> + "Hello World!" + end + ---- + <1> Library import + <2> [[url-mapping,url mapping]]URL mapping + <3> Response block + EOS + + doc = document_from_string input + refs = doc.catalog[:refs] + assert refs.key?('url-mapping') + output = doc.convert standalone: false + assert_xpath '(//p)[1]/a[@href="#url-mapping"][text()="url mapping"]', output, 1 + end end context "Nested lists" do test "asterisk element mixed with dash elements should be nested" do - input = <<-EOS -List -==== - -- Foo -* Boo -- Blech + input = <<~'EOS' + List + ==== + + - Foo + * Boo + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 @@ -720,15 +902,15 @@ end test "dash element mixed with asterisks elements should be nested" do - input = <<-EOS -List -==== - -* Foo -- Boo -* Blech + input = <<~'EOS' + List + ==== + + * Foo + - Boo + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 @@ -736,18 +918,18 @@ end test "lines prefixed with alternating list markers separated by blank lines should be nested" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -- Foo + - Foo -* Boo + * Boo -- Blech + - Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 @@ -755,15 +937,15 @@ end test "nested elements (2) with asterisks" do - input = <<-EOS -List -==== - -* Foo -** Boo -* Blech + input = <<~'EOS' + List + ==== + + * Foo + ** Boo + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 @@ -771,16 +953,16 @@ end test "nested elements (3) with asterisks" do - input = <<-EOS -List -==== - -* Foo -** Boo -*** Snoo -* Blech + input = <<~'EOS' + List + ==== + + * Foo + ** Boo + *** Snoo + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 @@ -788,17 +970,17 @@ end test "nested elements (4) with asterisks" do - input = <<-EOS -List -==== - -* Foo -** Boo -*** Snoo -**** Froo -* Blech + input = <<~'EOS' + List + ==== + + * Foo + ** Boo + *** Snoo + **** Froo + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 4 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 @@ -807,18 +989,18 @@ end test "nested elements (5) with asterisks" do - input = <<-EOS -List -==== - -* Foo -** Boo -*** Snoo -**** Froo -***** Groo -* Blech + input = <<~'EOS' + List + ==== + + * Foo + ** Boo + *** Snoo + **** Froo + ***** Groo + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 5 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 @@ -827,37 +1009,57 @@ assert_xpath '(((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 end - test 'nested elements (5) with unicode bullet' do - input = <<-EOS -List -==== - -• Foo -•• Boo -••• Snoo -•••• Froo -••••• Groo -• Blech + test 'nested arbitrary depth with asterisks' do + input = [] + ('a'..'z').each_with_index do |ch, i| + input << %(#{'*' * (i + 1)} #{ch}) + end + output = convert_string_to_embedded input.join(%(\n)) + refute_includes output, '*' + assert_css 'li', output, 26 + end + + test 'level of unordered list should match section level' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Parent Section + + * item 1.1 + ** item 2.1 + *** item 3.1 + ** item 2.2 + * item 1.2 + + === Nested Section + + * item 1.1 EOS - output = render_string input - assert_xpath '//ul', output, 5 - assert_xpath '(//ul)[1]/li', output, 2 - assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 - assert_xpath '(((//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 - assert_xpath '((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 - assert_xpath '(((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 - end if ::RUBY_MIN_VERSION_1_9 + + doc = document_from_string input + lists = doc.find_by context: :ulist + assert_equal 1, lists[0].level + assert_equal 1, lists[1].level + assert_equal 1, lists[2].level + assert_equal 2, lists[3].level + end + + test 'does not recognize lists with repeating unicode bullets' do + input = '•• Boo' + output = convert_string input + assert_xpath '//ul', output, 0 + assert_includes output, '•' + end test "nested ordered elements (2)" do - input = <<-EOS -List -==== - -. Foo -.. Boo -. Blech + input = <<~'EOS' + List + ==== + + . Foo + .. Boo + . Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 2 assert_xpath '//ol/li', output, 3 assert_xpath '(//ol)[1]/li', output, 2 @@ -865,32 +1067,66 @@ end test "nested ordered elements (3)" do - input = <<-EOS -List -==== - -. Foo -.. Boo -... Snoo -. Blech + input = <<~'EOS' + List + ==== + + . Foo + .. Boo + ... Snoo + . Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 3 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '((//ol)[1]/li//ol)[1]/li', output, 1 assert_xpath '(((//ol)[1]/li//ol)[1]/li//ol)[1]/li', output, 1 end + test 'nested arbitrary depth with dot marker' do + input = [] + ('a'..'z').each_with_index do |ch, i| + input << %(#{'.' * (i + 1)} #{ch}) + end + output = convert_string_to_embedded input.join(%(\n)) + refute_includes output, '.' + assert_css 'li', output, 26 + end + + test 'level of ordered list should match section level' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Parent Section + + . item 1.1 + .. item 2.1 + ... item 3.1 + .. item 2.2 + . item 1.2 + + === Nested Section + + . item 1.1 + EOS + + doc = document_from_string input + lists = doc.find_by context: :olist + assert_equal 1, lists[0].level + assert_equal 1, lists[1].level + assert_equal 1, lists[2].level + assert_equal 2, lists[3].level + end + test "nested unordered inside ordered elements" do - input = <<-EOS -List -==== - -. Foo -* Boo -. Blech + input = <<~'EOS' + List + ==== + + . Foo + * Boo + . Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ul', output, 1 assert_xpath '(//ol)[1]/li', output, 2 @@ -898,15 +1134,15 @@ end test "nested ordered inside unordered elements" do - input = <<-EOS -List -==== - -* Foo -. Boo -* Blech + input = <<~'EOS' + List + ==== + + * Foo + . Boo + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ol', output, 1 assert_xpath '(//ul)[1]/li', output, 2 @@ -914,16 +1150,16 @@ end test 'three levels of alternating unordered and ordered elements' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -* bullet 1 -. numbered 1.1 -** bullet 1.1.1 -* bullet 2 + * bullet 1 + . numbered 1.1 + ** bullet 1.1.1 + * bullet 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.ulist', output, 2 assert_css '.olist', output, 1 assert_css '.ulist > ul > li > p', output, 3 @@ -935,18 +1171,18 @@ end test "lines with alternating markers of unordered and ordered list types separated by blank lines should be nested" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -* Foo + * Foo -. Boo + . Boo -* Blech + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ol', output, 1 assert_xpath '(//ul)[1]/li', output, 2 @@ -954,19 +1190,20 @@ end test 'list item with literal content should not consume nested list of different type' do - input = <<-EOS -List -==== + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + List + ==== -- bullet + - bullet - literal - but not - hungry + literal + but not + hungry -. numbered + . numbered EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//li', output, 2 assert_xpath '//ul//ol', output, 1 @@ -979,38 +1216,39 @@ end test 'nested list item does not eat the title of the following detached block' do - input = <<-EOS -List -==== - -- bullet - * nested bullet 1 - * nested bullet 2 - -.Title -.... -literal -.... + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + List + ==== + + - bullet + * nested bullet 1 + * nested bullet 2 + + .Title + .... + literal + .... EOS - # use render_string so we can match all ulists easier - output = render_string input + # use convert_string so we can match all ulists easier + output = convert_string input assert_xpath '//*[@class="ulist"]/ul', output, 2 assert_xpath '(//*[@class="ulist"])[1]/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '(//*[@class="ulist"])[1]/following-sibling::*[@class="literalblock"]/*[@class="title"]', output, 1 end - test "lines with alternating markers of bulleted and labeled list types separated by blank lines should be nested" do - input = <<-EOS -List -==== + test "lines with alternating markers of bulleted and description list types separated by blank lines should be nested" do + input = <<~'EOS' + List + ==== -* Foo + * Foo -term1:: def1 + term1:: def1 -* Blech + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//dl', output, 1 assert_xpath '//ul[1]/li', output, 2 @@ -1019,16 +1257,16 @@ end test "nested ordered with attribute inside unordered elements" do - input = <<-EOS -Blah -==== - -* Foo -[start=2] -. Boo -* Blech + input = <<~'EOS' + Blah + ==== + + * Foo + [start=2] + . Boo + * Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ol', output, 1 assert_xpath '(//ul)[1]/li', output, 2 @@ -1038,17 +1276,17 @@ context "List continuations" do test "adjacent list continuation line attaches following paragraph" do - input = <<-EOS -Lists -===== - -* Item one, paragraph one -+ -Item one, paragraph two -+ -* Item two + input = <<~'EOS' + Lists + ===== + + * Item one, paragraph one + + + Item one, paragraph two + + + * Item two EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 @@ -1058,19 +1296,19 @@ end test "adjacent list continuation line attaches following block" do - input = <<-EOS -Lists -===== - -* Item one, paragraph one -+ -.... -Item one, literal block -.... -+ -* Item two + input = <<~'EOS' + Lists + ===== + + * Item one, paragraph one + + + .... + Item one, literal block + .... + + + * Item two EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 @@ -1078,23 +1316,23 @@ end test 'adjacent list continuation line attaches following block with block attributes' do - input = <<-EOS -Lists -===== - -* Item one, paragraph one -+ -:foo: bar -[[beck]] -.Read the following aloud to yourself -[source, ruby] ----- -5.times { print "Odelay!" } ----- + input = <<~'EOS' + Lists + ===== + + * Item one, paragraph one + + + :foo: bar + [[beck]] + .Read the following aloud to yourself + [source, ruby] + ---- + 5.times { print "Odelay!" } + ---- -* Item two + * Item two EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 @@ -1104,17 +1342,17 @@ end test 'trailing block attribute line attached by continuation should not create block' do - input = <<-EOS -Lists -===== + input = <<~'EOS' + Lists + ===== -* Item one, paragraph one -+ -[source] + * Item one, paragraph one + + + [source] -* Item two + * Item two EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/*', output, 1 @@ -1122,40 +1360,40 @@ end test 'trailing block title line attached by continuation should not create block' do - input = <<-EOS -Lists -===== + input = <<~'EOS' + Lists + ===== -* Item one, paragraph one -+ -.Disappears into the ether + * Item one, paragraph one + + + .Disappears into the ether -* Item two + * Item two EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/*', output, 1 end test 'consecutive blocks in list continuation attach to list item' do - input = <<-EOS -Lists -===== - -* Item one, paragraph one -+ -.... -Item one, literal block -.... -+ -____ -Item one, quote block -____ -+ -* Item two + input = <<~'EOS' + Lists + ===== + + * Item one, paragraph one + + + .... + Item one, literal block + .... + + + ____ + Item one, quote block + ____ + + + * Item two EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 @@ -1164,19 +1402,20 @@ end test 'list item with hanging indent followed by block attached by list continuation' do - input = <<-EOS -== Lists - -. list item 1 - continued -+ --- -open block in list item 1 --- + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists + + . list item 1 + continued + + + -- + open block in list item 1 + -- -. list item 2 + . list item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ol', output, 1 assert_css 'ol li', output, 2 assert_xpath %((//ol/li)[1]/p[text()="list item 1\ncontinued"]), output, 1 @@ -1186,20 +1425,20 @@ end test 'list item paragraph in list item and nested list item' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -. list item 1 -+ -list item 1 paragraph + . list item 1 + + + list item 1 paragraph -* nested list item -+ -nested list item paragraph + * nested list item + + + nested list item paragraph -. list item 2 + . list item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 @@ -1215,22 +1454,22 @@ end test 'trailing list continuations should attach to list items at respective levels' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -. list item 1 -+ -* nested list item 1 -* nested list item 2 -+ -paragraph for nested list item 2 + . list item 1 + + + * nested list item 1 + * nested list item 2 + + + paragraph for nested list item 2 -+ -paragraph for list item 1 + + + paragraph for list item 1 -. list item 2 + . list item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 @@ -1248,22 +1487,22 @@ end test 'trailing list continuations should attach to list items of different types at respective levels' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -* bullet 1 -. numbered 1.1 -** bullet 1.1.1 + * bullet 1 + . numbered 1.1 + ** bullet 1.1.1 -+ -numbered 1.1 paragraph + + + numbered 1.1 paragraph -+ -bullet 1 paragraph + + + bullet 1 paragraph -* bullet 2 + * bullet 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '(//ul)[1]/li', output, 2 @@ -1284,27 +1523,27 @@ end test 'repeated list continuations should attach to list items at respective levels' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -. list item 1 + . list item 1 -* nested list item 1 -+ --- -open block for nested list item 1 --- -+ -* nested list item 2 -+ -paragraph for nested list item 2 + * nested list item 1 + + + -- + open block for nested list item 1 + -- + + + * nested list item 2 + + + paragraph for nested list item 2 -+ -paragraph for list item 1 + + + paragraph for list item 1 -. list item 2 + . list item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 @@ -1325,27 +1564,27 @@ end test 'repeated list continuations attached directly to list item should attach to list items at respective levels' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -. list item 1 -+ -* nested list item 1 -+ --- -open block for nested list item 1 --- -+ -* nested list item 2 -+ -paragraph for nested list item 2 + . list item 1 + + + * nested list item 1 + + + -- + open block for nested list item 1 + -- + + + * nested list item 2 + + + paragraph for nested list item 2 -+ -paragraph for list item 1 + + + paragraph for list item 1 -. list item 2 + . list item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 @@ -1366,28 +1605,28 @@ end test 'repeated list continuations should attach to list items at respective levels ignoring blank lines' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -. list item 1 -+ -* nested list item 1 -+ --- -open block for nested list item 1 --- -+ -* nested list item 2 -+ -paragraph for nested list item 2 + . list item 1 + + + * nested list item 1 + + + -- + open block for nested list item 1 + -- + + + * nested list item 2 + + + paragraph for nested list item 2 -+ -paragraph for list item 1 + + + paragraph for list item 1 -. list item 2 + . list item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 @@ -1408,28 +1647,28 @@ end test 'trailing list continuations should ignore preceding blank lines' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -* bullet 1 -** bullet 1.1 -*** bullet 1.1.1 -+ --- -open block --- + * bullet 1 + ** bullet 1.1 + *** bullet 1.1.1 + + + -- + open block + -- -+ -bullet 1.1 paragraph + + + bullet 1.1 paragraph -+ -bullet 1 paragraph + + + bullet 1 paragraph -* bullet 2 + * bullet 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '((//ul)[1]/li[1])/*', output, 3 assert_xpath '(((//ul)[1]/li[1])/*)[1]/self::p[text()="bullet 1"]', output, 1 @@ -1449,21 +1688,22 @@ end test 'indented outline list item with different marker offset by a blank line should be recognized as a nested list' do - input = <<-EOS -* item 1 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + * item 1 - . item 1.1 -+ -attached paragraph + . item 1.1 + + + attached paragraph - . item 1.2 -+ -attached paragraph + . item 1.2 + + + attached paragraph -* item 2 + * item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ol', output, 1 @@ -1480,22 +1720,23 @@ end end - test 'indented labeled list item inside outline list item offset by a blank line should be recognized as a nested list' do - input = <<-EOS -* item 1 + test 'indented description list item inside outline list item offset by a blank line should be recognized as a nested list' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + * item 1 - term a:: description a -+ -attached paragraph + term a:: description a + + + attached paragraph - term b:: description b -+ -attached paragraph + term b:: description b + + + attached paragraph -* item 2 + * item 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'dl', output, 1 @@ -1516,21 +1757,21 @@ # NOTE this is not consistent w/ AsciiDoc output, but this is some screwy input anyway =begin test "consecutive list continuation lines are folded" do - input = <<-EOS -Lists -===== - -* Item one, paragraph one -+ -+ -Item one, paragraph two -+ -+ -* Item two -+ -+ + input = <<~'EOS' + Lists + ===== + + * Item one, paragraph one + + + + + Item one, paragraph two + + + + + * Item two + + + + EOS - output = render_string input + output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 @@ -1540,178 +1781,242 @@ end =end + test 'should warn if unterminated block is detected in list item' do + input = <<~'EOS' + * item + + + ==== + example + * swallowed item + EOS + + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_xpath '//ul/li', output, 1 + assert_xpath '//ul/li/*[@class="exampleblock"]', output, 1 + assert_xpath %(//p[text()="example\n* swallowed item"]), output, 1 + assert_message logger, :WARN, ': line 3: unterminated example block', Hash + end + end end end context "Ordered lists (:olist)" do context "Simple lists" do test "dot elements with no blank lines" do - input = <<-EOS -List -==== - -. Foo -. Boo -. Blech + input = <<~'EOS' + List + ==== + + . Foo + . Boo + . Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'indented dot elements using spaces' do - input = <<-EOS - . Foo - . Boo - . Blech + input = <<~EOS + \x20. Foo + \x20. Boo + \x20. Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'indented dot elements using tabs' do - input = <<-EOS -\t.\tFoo -\t.\tBoo -\t.\tBlech + input = <<~EOS + \t.\tFoo + \t.\tBoo + \t.\tBlech EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'should represent explicit role attribute as style class' do - input = <<-EOS -[role="dry"] -. Once -. Again -. Refactor! + input = <<~'EOS' + [role="dry"] + . Once + . Again + . Refactor! EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.olist.arabic.dry', output, 1 assert_css '.olist ol.arabic', output, 1 end + test 'should base list style on marker length rather than list depth' do + input = <<~'EOS' + ... parent + .. child + . grandchild + EOS + + output = convert_string_to_embedded input + assert_css '.olist.lowerroman', output, 1 + assert_css '.olist.lowerroman .olist.loweralpha', output, 1 + assert_css '.olist.lowerroman .olist.loweralpha .olist.arabic', output, 1 + end + + test 'should allow list style to be specified explicitly when using markers with implicit style' do + input = <<~'EOS' + [loweralpha] + i) 1 + ii) 2 + iii) 3 + EOS + + output = convert_string_to_embedded input + assert_css '.olist.loweralpha', output, 1 + assert_css '.olist.lowerroman', output, 0 + end + test 'should represent custom numbering and explicit role attribute as style classes' do - input = <<-EOS -[loweralpha, role="dry"] -. Once -. Again -. Refactor! + input = <<~'EOS' + [loweralpha, role="dry"] + . Once + . Again + . Refactor! EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.olist.loweralpha.dry', output, 1 assert_css '.olist ol.loweralpha', output, 1 end test 'should set reversed attribute on list if reversed option is set' do - input = <<-EOS -[%reversed, start=3] -. three -. two -. one -. blast off! + input = <<~'EOS' + [%reversed, start=3] + . three + . two + . one + . blast off! EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'ol[reversed][start="3"]', output, 1 end test 'should represent implicit role attribute as style class' do - input = <<-EOS -[.dry] -. Once -. Again -. Refactor! + input = <<~'EOS' + [.dry] + . Once + . Again + . Refactor! EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.olist.arabic.dry', output, 1 assert_css '.olist ol.arabic', output, 1 end test 'should represent custom numbering and implicit role attribute as style classes' do - input = <<-EOS -[loweralpha.dry] -. Once -. Again -. Refactor! + input = <<~'EOS' + [loweralpha.dry] + . Once + . Again + . Refactor! EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.olist.loweralpha.dry', output, 1 assert_css '.olist ol.loweralpha', output, 1 end test "dot elements separated by blank lines should merge lists" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -. Foo + . Foo -. Boo + . Boo -. Blech + . Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end + test 'should escape special characters in all literal paragraphs attached to list item' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + . first item + + text + + more text + + . second item + EOS + + output = convert_string_to_embedded input + assert_css 'li', output, 2 + assert_css 'code', output, 0 + assert_css 'li:first-of-type > *', output, 3 + assert_css 'li:first-of-type pre', output, 2 + assert_xpath '((//li)[1]//pre)[1][text()="text"]', output, 1 + assert_xpath '((//li)[1]//pre)[2][text()="more text"]', output, 1 + end + test 'dot elements with interspersed line comments should be skipped and not break list' do - input = <<-EOS -== List + input = <<~'EOS' + == List -. Foo -// line comment -// another line comment -. Boo -// line comment -more text -// another line comment -. Blech + . Foo + // line comment + // another line comment + . Boo + // line comment + more text + // another line comment + . Blech EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 assert_xpath %((//ol/li)[2]/p[text()="Boo\nmore text"]), output, 1 end test "dot elements separated by line comment offset by blank lines should not merge lists" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -. Foo -. Boo + . Foo + . Boo -// + // -. Blech + . Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 2 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[2]/li', output, 1 end test "dot elements separated by a block title offset by a blank line should not merge lists" do - input = <<-EOS -List -==== + input = <<~'EOS' + List + ==== -. Foo -. Boo + . Foo + . Boo -.Also -. Blech + .Also + . Blech EOS - output = render_string input + output = convert_string input assert_xpath '//ol', output, 2 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[2]/li', output, 1 @@ -1719,61 +2024,116 @@ end test "dot elements separated by an attribute entry offset by a blank line should not merge lists" do - input = <<-EOS -== List + input = <<~'EOS' + == List -. Foo -. Boo + . Foo + . Boo -:foo: bar -. Blech + :foo: bar + . Blech EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//ol', output, 2 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[2]/li', output, 1 end - test 'should use start number in docbook4.5 backend' do - input = <<-EOS -== List + test 'should use start number in docbook5 backend' do + input = <<~'EOS' + == List -[start=7] -. item 7 -. item 8 + [start=7] + . item 7 + . item 8 EOS - output = render_embedded_string input, :backend => 'docbook45' + output = convert_string_to_embedded input, backend: 'docbook5' assert_xpath '//orderedlist', output, 1 assert_xpath '(//orderedlist)/listitem', output, 2 - assert_xpath '(//orderedlist/listitem)[1][@override = "7"]', output, 1 + assert_xpath '(//orderedlist)[@startingnumber = "7"]', output, 1 end - test 'should use start number in docbook5 backend' do - input = <<-EOS -== List + test 'should match trailing line separator in text of list item' do + input = <<~EOS.chop + . a + . b#{decode_char 8232} + . c + EOS + + output = convert_string input + assert_css 'li', output, 3 + assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}"]), output, 1 + end -[start=7] -. item 7 -. item 8 + test 'should match line separator in text of list item' do + input = <<~EOS.chop + . a + . b#{decode_char 8232}b + . c EOS - output = render_embedded_string input, :backend => 'docbook5' - assert_xpath '//orderedlist', output, 1 - assert_xpath '(//orderedlist)/listitem', output, 2 - assert_xpath '(//orderedlist)[@startingnumber = "7"]', output, 1 + output = convert_string input + assert_css 'li', output, 3 + assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}b"]), output, 1 + end + end + + test 'should warn if explicit uppercase roman numerals in list are out of sequence' do + input = <<~'EOS' + I) one + III) three + EOS + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_xpath '//ol/li', output, 2 + assert_message logger, :WARN, ': line 2: list item index: expected II, got III', Hash + end + end + + test 'should warn if explicit lowercase roman numerals in list are out of sequence' do + input = <<~'EOS' + i) one + iii) three + EOS + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_xpath '//ol/li', output, 2 + assert_message logger, :WARN, ': line 2: list item index: expected ii, got iii', Hash end end end context "Description lists (:dlist)" do context "Simple lists" do + test 'should not parse a bare dlist delimiter as a dlist' do + input = '::' + output = convert_string_to_embedded input + assert_css 'dl', output, 0 + assert_xpath '//p[text()="::"]', output, 1 + end + + test 'should not parse an indented bare dlist delimiter as a dlist' do + input = ' ::' + output = convert_string_to_embedded input + assert_css 'dl', output, 0 + assert_xpath '//pre[text()="::"]', output, 1 + end + + test 'should parse a dlist delimiter preceded by a blank attribute as a dlist' do + input = '{blank}::' + output = convert_string_to_embedded input + assert_css 'dl', output, 1 + assert_css 'dl > dt', output, 1 + assert_css 'dl > dt:empty', output, 1 + end + test "single-line adjacent elements" do - input = <<-EOS -term1:: def1 -term2:: def2 + input = <<~'EOS' + term1:: def1 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 @@ -1783,12 +2143,39 @@ assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end + test 'should parse sibling items using same rules' do + input = <<~'EOS' + term1;; ;; def1 + term2;; ;; def2 + EOS + output = convert_string input + assert_xpath '//dl', output, 1 + assert_xpath '//dl/dt', output, 2 + assert_xpath '//dl/dt/following-sibling::dd', output, 2 + assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 + assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = ";; def1"]', output, 1 + assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 + assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = ";; def2"]', output, 1 + end + + test 'should allow term to end with a semicolon when using double semicolon delimiter' do + input = <<~'EOS' + term;;; def + EOS + output = convert_string_to_embedded input + assert_css 'dl', output, 1 + assert_css 'dl > dt', output, 1 + assert_xpath '(//dl/dt)[1][text() = "term;"]', output, 1 + assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def"]', output, 1 + end + test "single-line indented adjacent elements" do - input = <<-EOS -term1:: def1 - term2:: def2 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: def1 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 @@ -1799,11 +2186,11 @@ end test "single-line indented adjacent elements with tabs" do - input = <<-EOS -term1::\tdef1 -\tterm2::\tdef2 + input = <<~EOS + term1::\tdef1 + \tterm2::\tdef2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 @@ -1814,26 +2201,26 @@ end test "single-line elements separated by blank line should create a single list" do - input = <<-EOS -term1:: def1 + input = <<~'EOS' + term1:: def1 -term2:: def2 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 end test "a line comment between elements should divide them into separate lists" do - input = <<-EOS -term1:: def1 + input = <<~'EOS' + term1:: def1 -// + // -term2:: def2 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl/dt', output, 2 assert_xpath '(//dl)[1]/dt', output, 1 @@ -1841,14 +2228,14 @@ end test "a ruler between elements should divide them into separate lists" do - input = <<-EOS -term1:: def1 + input = <<~'EOS' + term1:: def1 -''' + ''' -term2:: def2 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl//hr', output, 0 @@ -1857,13 +2244,13 @@ end test "a block title between elements should divide them into separate lists" do - input = <<-EOS -term1:: def1 + input = <<~'EOS' + term1:: def1 -.Some more -term2:: def2 + .Some more + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl/dt', output, 2 assert_xpath '(//dl)[1]/dt', output, 1 @@ -1872,13 +2259,13 @@ end test "multi-line elements with paragraph content" do - input = <<-EOS -term1:: -def1 -term2:: -def2 + input = <<~'EOS' + term1:: + def1 + term2:: + def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 @@ -1889,13 +2276,14 @@ end test "multi-line elements with indented paragraph content" do - input = <<-EOS -term1:: - def1 -term2:: - def2 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: + def1 + term2:: + def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 @@ -1906,16 +2294,17 @@ end test "multi-line elements with indented paragraph content that includes comment lines" do - input = <<-EOS -term1:: - def1 -// comment -term2:: - def2 -// comment - def2 continued + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: + def1 + // comment + term2:: + def2 + // comment + def2 continued EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 @@ -1926,44 +2315,70 @@ end test "should not strip comment line in literal paragraph block attached to list item" do - input = <<-EOS -term1:: -+ - line 1 -// not a comment - line 3 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: + + + line 1 + // not a comment + line 3 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="literalblock"]', output, 1 assert_xpath %(//*[@class="literalblock"]//pre[text()=" line 1\n// not a comment\n line 3"]), output, 1 end + test 'should escape special characters in all literal paragraphs attached to list item' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term:: desc + + text + + more text + + another term:: + + text in a paragraph + EOS + + output = convert_string_to_embedded input + assert_css 'dt', output, 2 + assert_css 'code', output, 0 + assert_css 'dd:first-of-type > *', output, 3 + assert_css 'dd:first-of-type pre', output, 2 + assert_xpath '((//dd)[1]//pre)[1][text()="text"]', output, 1 + assert_xpath '((//dd)[1]//pre)[2][text()="more text"]', output, 1 + assert_xpath '((//dd)[2]//p)[1][text()="text in a paragraph"]', output, 1 + end + test 'multi-line element with paragraph starting with multiple dashes should not be seen as list' do - input = <<-EOS -term1:: - def1 - -- and a note + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: + def1 + -- and a note -term2:: - def2 + term2:: + def2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 - assert_xpath %((//dl/dt)[1]/following-sibling::dd/p[text() = "def1#{entity 8201}#{entity 8212}#{entity 8201}and a note"]), output, 1 + assert_xpath %((//dl/dt)[1]/following-sibling::dd/p[text() = "def1#{decode_char 8201}#{decode_char 8212}#{decode_char 8201}and a note"]), output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line element with multiple terms" do - input = <<-EOS -term1:: -term2:: -def2 + input = <<~'EOS' + term1:: + term2:: + def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 1 @@ -1974,14 +2389,14 @@ end test 'consecutive terms share same varlistentry in docbook' do - input = <<-EOS -term:: -alt term:: -description + input = <<~'EOS' + term:: + alt term:: + description -last:: + last:: EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//varlistentry', output, 2 assert_xpath '(//varlistentry)[1]/term', output, 2 assert_xpath '(//varlistentry)[2]/term', output, 1 @@ -1990,15 +2405,15 @@ end test "multi-line elements with blank line before paragraph content" do - input = <<-EOS -term1:: + input = <<~'EOS' + term1:: -def1 -term2:: + def1 + term2:: -def2 + def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 @@ -2010,16 +2425,17 @@ test "multi-line elements with paragraph and literal content" do # blank line following literal paragraph is required or else it will gobble up the second term - input = <<-EOS -term1:: -def1 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: + def1 - literal + literal -term2:: - def2 + term2:: + def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 @@ -2031,12 +2447,12 @@ end test "mixed single and multi-line adjacent elements" do - input = <<-EOS -term1:: def1 -term2:: -def2 + input = <<~'EOS' + term1:: def1 + term2:: + def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 @@ -2046,39 +2462,47 @@ assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end - test "element with anchor" do - input = <<-EOS -[[term1]]term1:: def1 -[[term2]]term2:: def2 - EOS - output = render_string input + test 'should discover anchor at start of description term text and register it as a reference' do + input = <<~'EOS' + The highest peak in the Front Range is <>, which tops <> by just a few feet. + + [[mount-evans,Mount Evans]]Mount Evans:: 14,271 feet + [[grays-peak]]Grays Peak:: 14,278 feet + EOS + doc = document_from_string input + refs = doc.catalog[:refs] + assert refs.key?('mount-evans') + assert refs.key?('grays-peak') + output = doc.convert standalone: false + assert_xpath '(//p)[1]/a[@href="#grays-peak"][text()="Grays Peak"]', output, 1 + assert_xpath '(//p)[1]/a[@href="#mount-evans"][text()="Mount Evans"]', output, 1 assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 - assert_xpath '(//dl/dt)[1]/a[@id = "term1"]', output, 1 - assert_xpath '(//dl/dt)[2]/a[@id = "term2"]', output, 1 + assert_xpath '(//dl/dt)[1]/a[@id="mount-evans"]', output, 1 + assert_xpath '(//dl/dt)[2]/a[@id="grays-peak"]', output, 1 end - test "missing space before term does not produce labeled list" do - input = <<-EOS -term1::def1 -term2::def2 + test "missing space before term does not produce description list" do + input = <<~'EOS' + term1::def1 + term2::def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 0 end - test "literal block inside labeled list" do - input = <<-EOS -term:: -+ -.... -literal, line 1 - -literal, line 2 -.... -anotherterm:: def + test "literal block inside description list" do + input = <<~'EOS' + term:: + + + .... + literal, line 1 + + literal, line 2 + .... + anotherterm:: def EOS - output = render_string input + output = convert_string input assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd//pre', output, 1 @@ -2086,19 +2510,19 @@ assert_xpath '(//dl/dd)[2]/p[text() = "def"]', output, 1 end - test "literal block inside labeled list with trailing line continuation" do - input = <<-EOS -term:: -+ -.... -literal, line 1 - -literal, line 2 -.... -+ -anotherterm:: def + test "literal block inside description list with trailing line continuation" do + input = <<~'EOS' + term:: + + + .... + literal, line 1 + + literal, line 2 + .... + + + anotherterm:: def EOS - output = render_string input + output = convert_string input assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd//pre', output, 1 @@ -2106,24 +2530,24 @@ assert_xpath '(//dl/dd)[2]/p[text() = "def"]', output, 1 end - test "multiple listing blocks inside labeled list" do - input = <<-EOS -term:: -+ ----- -listing, line 1 - -listing, line 2 ----- -+ ----- -listing, line 1 - -listing, line 2 ----- -anotherterm:: def + test "multiple listing blocks inside description list" do + input = <<~'EOS' + term:: + + + ---- + listing, line 1 + + listing, line 2 + ---- + + + ---- + listing, line 1 + + listing, line 2 + ---- + anotherterm:: def EOS - output = render_string input + output = convert_string input assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd//pre', output, 2 @@ -2131,31 +2555,31 @@ assert_xpath '(//dl/dd)[2]/p[text() = "def"]', output, 1 end - test "open block inside labeled list" do - input = <<-EOS -term:: -+ --- -Open block as description of term. - -And some more detail... --- -anotherterm:: def + test "open block inside description list" do + input = <<~'EOS' + term:: + + + -- + Open block as description of term. + + And some more detail... + -- + anotherterm:: def EOS - output = render_string input + output = convert_string input assert_xpath '//dl/dd//p', output, 3 assert_xpath '(//dl/dd)[1]//*[@class="openblock"]//p', output, 2 end - test "paragraph attached by a list continuation on either side in a labeled list" do - input = <<-EOS -term1:: def1 -+ -more detail -+ -term2:: def2 + test "paragraph attached by a list continuation on either side in a description list" do + input = <<~'EOS' + term1:: def1 + + + more detail + + + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '(//dl/dt)[1][normalize-space(text())="term1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text())="term2"]', output, 1 assert_xpath '(//dl/dd)[1]//p', output, 2 @@ -2163,16 +2587,16 @@ assert_xpath '(//dl/dd)[1]/p/following-sibling::*[@class="paragraph"]/p[text() = "more detail"]', output, 1 end - test "paragraph attached by a list continuation on either side to a multi-line element in a labeled list" do - input = <<-EOS -term1:: -def1 -+ -more detail -+ -term2:: def2 + test "paragraph attached by a list continuation on either side to a multi-line element in a description list" do + input = <<~'EOS' + term1:: + def1 + + + more detail + + + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '(//dl/dt)[1][normalize-space(text())="term1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text())="term2"]', output, 1 assert_xpath '(//dl/dd)[1]//p', output, 2 @@ -2180,46 +2604,65 @@ assert_xpath '(//dl/dd)[1]/p/following-sibling::*[@class="paragraph"]/p[text() = "more detail"]', output, 1 end - test "verse paragraph inside a labeled list" do - input = <<-EOS -term1:: def -+ -[verse] -la la la + test 'should continue to parse subsequent blocks attached to list item after first block is dropped' do + input = <<~'EOS' + :attribute-missing: drop-line + + term:: + + + image::{unresolved}[] + + + paragraph + EOS + + output = convert_string_to_embedded input + assert_css 'dl', output, 1 + assert_css 'dl > dt', output, 1 + assert_css 'dl > dt + dd', output, 1 + assert_css 'dl > dt + dd > .imageblock', output, 0 + assert_css 'dl > dt + dd > .paragraph', output, 1 + end + + test "verse paragraph inside a description list" do + input = <<~'EOS' + term1:: def + + + [verse] + la la la -term2:: def + term2:: def EOS - output = render_string input + output = convert_string input assert_xpath '//dl/dd//p', output, 2 assert_xpath '(//dl/dd)[1]/*[@class="verseblock"]/pre[text() = "la la la"]', output, 1 end - test "list inside a labeled list" do - input = <<-EOS -term1:: -* level 1 -** level 2 -* level 1 -term2:: def + test "list inside a description list" do + input = <<~'EOS' + term1:: + * level 1 + ** level 2 + * level 1 + term2:: def EOS - output = render_string input + output = convert_string input assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd/p', output, 1 assert_xpath '(//dl/dd)[1]//ul', output, 2 assert_xpath '((//dl/dd)[1]//ul)[1]//ul', output, 1 end - test "list inside a labeled list offset by blank lines" do - input = <<-EOS -term1:: + test "list inside a description list offset by blank lines" do + input = <<~'EOS' + term1:: -* level 1 -** level 2 -* level 1 + * level 1 + ** level 2 + * level 1 -term2:: def + term2:: def EOS - output = render_string input + output = convert_string input assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd/p', output, 1 assert_xpath '(//dl/dd)[1]//ul', output, 2 @@ -2227,20 +2670,20 @@ end test "should only grab one line following last item if item has no inline description" do - input = <<-EOS -term1:: + input = <<~'EOS' + term1:: -def1 + def1 -term2:: + term2:: -def2 + def2 -A new paragraph + A new paragraph -Another new paragraph + Another new paragraph EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text() = "def1"]', output, 1 @@ -2251,20 +2694,21 @@ end test "should only grab one literal line following last item if item has no inline description" do - input = <<-EOS -term1:: + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: -def1 + def1 -term2:: + term2:: - def2 + def2 -A new paragraph + A new paragraph -Another new paragraph + Another new paragraph EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text() = "def1"]', output, 1 @@ -2275,20 +2719,21 @@ end test "should append subsequent paragraph literals to list item as block content" do - input = <<-EOS -term1:: + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: -def1 + def1 -term2:: + term2:: - def2 + def2 - literal + literal -A new paragraph. + A new paragraph. EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text() = "def1"]', output, 1 @@ -2299,44 +2744,174 @@ assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[1]/p[text() = "A new paragraph."]', output, 1 end - test 'should not match comment line that looks like labeled list term' do - input = <<-EOS -* item + test 'should not match comment line that looks like description list term' do + input = <<~'EOS' + before -//:: -== Section + //key:: val -section text + after EOS - output = render_embedded_string input + output = convert_string_to_embedded input + assert_css 'dl', output, 0 + end + + test 'should not match comment line following list that looks like description list term' do + input = <<~'EOS' + * item + + //term:: desc + == Section + + section text + EOS + + output = convert_string_to_embedded input assert_xpath '/*[@class="ulist"]', output, 1 assert_xpath '/*[@class="sect1"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[text()="Section"]', output, 1 assert_xpath '/*[@class="ulist"]/following-sibling::*[@class="sect1"]', output, 1 end + test 'should not match comment line that looks like sibling description list term' do + input = <<~'EOS' + before + + foo:: bar + //yin:: yang + + after + EOS + + output = convert_string_to_embedded input + assert_css '.dlist', output, 1 + assert_css '.dlist dt', output, 1 + refute_includes output, 'yin' + end + + test 'should not hang on description list item in list that begins with ///' do + input = <<~'EOS' + * a + ///b:: + c + EOS + + output = convert_string_to_embedded input + assert_css 'ul', output, 1 + assert_css 'ul li dl', output, 1 + assert_xpath '//ul/li/p[text()="a"]', output, 1 + assert_xpath '//dt[text()="///b"]', output, 1 + assert_xpath '//dd/p[text()="c"]', output, 1 + end + + test 'should not hang on sibling description list item that begins with ///' do + input = <<~'EOS' + a:: + ///b:: + c + EOS + + output = convert_string_to_embedded input + assert_css 'dl', output, 1 + assert_xpath '(//dl/dt)[1][text()="a"]', output, 1 + assert_xpath '(//dl/dt)[2][text()="///b"]', output, 1 + assert_xpath '//dl/dd/p[text()="c"]', output, 1 + end + + test 'should skip dlist term that begins with // unless it begins with ///' do + input = <<~'EOS' + category a:: + //ignored term:: def + + category b:: + ///term:: def + EOS + + output = convert_string_to_embedded input + refute_includes output, 'ignored term' + assert_xpath '//dt[text()="///term"]', output, 1 + end + test 'more than 4 consecutive colons should become part of description list term' do - input = <<-EOS -A term::::: a description + input = <<~'EOS' + A term::::: a description EOS - output = render_embedded_string input - assert_xpath '//dl', output, 1 - assert_xpath '//dt', output, 1 - assert_xpath '//dt[text()="A term:"]', output, 1 - assert_xpath '//dd/p[text()="a description"]', output, 1 + output = convert_string_to_embedded input + assert_css 'dl', output, 1 + assert_css 'dl > dt', output, 1 + assert_xpath '//dl/dt[text()="A term:"]', output, 1 + assert_xpath '//dl/dd/p[text()="a description"]', output, 1 + end + + test 'text method of dd node should return nil if dd node only contains blocks' do + input = <<~'EOS' + term:: + + + paragraph + EOS + + doc = document_from_string input + dd = doc.blocks[0].items[0][1] + assert_nil dd.text + end + + test 'should match trailing line separator in text of list item' do + input = <<~EOS.chop + A:: a + B:: b#{decode_char 8232} + C:: c + EOS + + output = convert_string input + assert_css 'dd', output, 3 + assert_xpath %((//dd)[2]/p[text()="b#{decode_char 8232}"]), output, 1 + end + + test 'should match line separator in text of list item' do + input = <<~EOS.chop + A:: a + B:: b#{decode_char 8232}b + C:: c + EOS + + output = convert_string input + assert_css 'dd', output, 3 + assert_xpath %((//dd)[2]/p[text()="b#{decode_char 8232}b"]), output, 1 end end context "Nested lists" do + test 'should not parse a nested dlist delimiter without a term as a dlist' do + input = <<~'EOS' + t:: + ;; + EOS + output = convert_string_to_embedded input + assert_xpath '//dl', output, 1 + assert_xpath '//dl/dd/p[text()=";;"]', output, 1 + end + + test 'should not parse a nested indented dlist delimiter without a term as a dlist' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + t:: + desc + ;; + EOS + output = convert_string_to_embedded input + assert_xpath '//dl', output, 1 + assert_xpath %(//dl/dd/p[text()="desc\n ;;"]), output, 1 + end + test "single-line adjacent nested elements" do - input = <<-EOS -term1:: def1 -label1::: detail1 -term2:: def2 + input = <<~'EOS' + term1:: def1 + label1::: detail1 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 @@ -2348,27 +2923,27 @@ end test "single-line adjacent maximum nested elements" do - input = <<-EOS -term1:: def1 -label1::: detail1 -name1:::: value1 -item1;; price1 -term2:: def2 + input = <<~'EOS' + term1:: def1 + label1::: detail1 + name1:::: value1 + item1;; price1 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 4 assert_xpath '//dl//dl//dl//dl', output, 1 end test "single-line nested elements seperated by blank line at top level" do - input = <<-EOS -term1:: def1 + input = <<~'EOS' + term1:: def1 -label1::: detail1 + label1::: detail1 -term2:: def2 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 @@ -2380,14 +2955,14 @@ end test "single-line nested elements seperated by blank line at nested level" do - input = <<-EOS -term1:: def1 -label1::: detail1 + input = <<~'EOS' + term1:: def1 + label1::: detail1 -label2::: detail2 -term2:: def2 + label2::: detail2 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 @@ -2399,12 +2974,12 @@ end test "single-line adjacent nested elements with alternate delimiters" do - input = <<-EOS -term1:: def1 -label1;; detail1 -term2:: def2 + input = <<~'EOS' + term1:: def1 + label1;; detail1 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 @@ -2416,15 +2991,15 @@ end test "multi-line adjacent nested elements" do - input = <<-EOS -term1:: -def1 -label1::: -detail1 -term2:: -def2 + input = <<~'EOS' + term1:: + def1 + label1::: + detail1 + term2:: + def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 @@ -2436,18 +3011,18 @@ end test "multi-line nested elements seperated by blank line at nested level repeated" do - input = <<-EOS -term1:: -def1 -label1::: - -detail1 -label2::: -detail2 + input = <<~'EOS' + term1:: + def1 + label1::: + + detail1 + label2::: + detail2 -term2:: def2 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 @@ -2459,15 +3034,16 @@ end test "multi-line element with indented nested element" do - input = <<-EOS -term1:: - def1 - label1;; - detail1 -term2:: - def2 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: + def1 + label1;; + detail1 + term2:: + def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt', output, 2 @@ -2482,13 +3058,14 @@ end test "mixed single and multi-line elements with indented nested elements" do - input = <<-EOS -term1:: def1 - label1::: - detail1 -term2:: def2 + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + term1:: def1 + label1::: + detail1 + term2:: def2 EOS - output = render_string input + output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 @@ -2500,13 +3077,13 @@ end test "multi-line elements with first paragraph folded to text with adjacent nested element" do - input = <<-EOS -term1:: def1 -continued -label1::: -detail1 + input = <<~'EOS' + term1:: def1 + continued + label1::: + detail1 EOS - output = render_string input + output = convert_string_to_embedded input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 @@ -2518,27 +3095,27 @@ end context 'Special lists' do - test 'should render glossary list with proper semantics' do - input = <<-EOS -[glossary] -term 1:: def 1 -term 2:: def 2 + test 'should convert glossary list with proper semantics' do + input = <<~'EOS' + [glossary] + term 1:: def 1 + term 2:: def 2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.dlist.glossary', output, 1 assert_css '.dlist dt:not([class])', output, 2 end test 'consecutive glossary terms should share same glossentry element in docbook' do - input = <<-EOS -[glossary] -term:: -alt term:: -description + input = <<~'EOS' + [glossary] + term:: + alt term:: + description -last:: + last:: EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/glossentry', output, 2 assert_xpath '(/glossentry)[1]/glossterm', output, 2 assert_xpath '(/glossentry)[2]/glossterm', output, 1 @@ -2546,41 +3123,44 @@ assert_xpath '(/glossentry)[2]/glossdef[normalize-space(text())=""]', output, 1 end - test 'should render horizontal list with proper markup' do - input = <<-EOS -[horizontal] -first term:: description -+ -more detail + test 'should convert horizontal list with proper markup' do + input = <<~'EOS' + [horizontal] + first term:: description + + + more detail -second term:: description + second term:: description EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.hdlist', output, 1 assert_css '.hdlist table', output, 1 assert_css '.hdlist table colgroup', output, 0 assert_css '.hdlist table tr', output, 2 - assert_xpath '/*[@class="hdlist"]/table/tr[1]/td', output, 2 - assert_xpath '/*[@class="hdlist"]/table/tr[1]/td[@class="hdlist1"]', output, 1 - assert_xpath '/*[@class="hdlist"]/table/tr[1]/td[@class="hdlist2"]', output, 1 - assert_xpath '/*[@class="hdlist"]/table/tr[1]/td[@class="hdlist2"]/p', output, 1 - assert_xpath '/*[@class="hdlist"]/table/tr[1]/td[@class="hdlist2"]/p/following-sibling::*[@class="paragraph"]', output, 1 + # see nokogiri#1803 for why this is necessary + tbody_path = jruby? ? 'tbody/' : '' + refute_includes output, '' + assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td), output, 2 + assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td[@class="hdlist1"]), output, 1 + assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td[@class="hdlist2"]), output, 1 + assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td[@class="hdlist2"]/p), output, 1 + assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td[@class="hdlist2"]/p/following-sibling::*[@class="paragraph"]), output, 1 assert_xpath '((//tr)[1]/td)[1][normalize-space(text())="first term"]', output, 1 assert_xpath '((//tr)[1]/td)[2]/p[normalize-space(text())="description"]', output, 1 - assert_xpath '/*[@class="hdlist"]/table/tr[2]/td', output, 2 + assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[2]/td), output, 2 assert_xpath '((//tr)[2]/td)[1][normalize-space(text())="second term"]', output, 1 assert_xpath '((//tr)[2]/td)[2]/p[normalize-space(text())="description"]', output, 1 end test 'should set col widths of item and label if specified' do - input = <<-EOS -[horizontal] -[labelwidth="25", itemwidth="75"] -term:: def + input = <<~'EOS' + [horizontal] + [labelwidth="25", itemwidth="75"] + term:: def EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup', output, 1 assert_css 'table > colgroup > col', output, 2 @@ -2589,13 +3169,13 @@ end test 'should set col widths of item and label in docbook if specified' do - input = <<-EOS -[horizontal] -[labelwidth="25", itemwidth="75"] -term:: def + input = <<~'EOS' + [horizontal] + [labelwidth="25", itemwidth="75"] + term:: def EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_css 'informaltable', output, 1 assert_css 'informaltable > tgroup', output, 1 assert_css 'informaltable > tgroup > colspec', output, 2 @@ -2604,26 +3184,26 @@ end test 'should add strong class to label if strong option is set' do - input = <<-EOS -[horizontal, options="strong"] -term:: def + input = <<~'EOS' + [horizontal, options="strong"] + term:: def EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.hdlist', output, 1 assert_css '.hdlist td.hdlist1.strong', output, 1 end test 'consecutive terms in horizontal list should share same cell' do - input = <<-EOS -[horizontal] -term:: -alt term:: -description + input = <<~'EOS' + [horizontal] + term:: + alt term:: + description -last:: + last:: EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//tr', output, 2 assert_xpath '(//tr)[1]/td[@class="hdlist1"]', output, 1 # NOTE I'm trimming the trailing
    in Asciidoctor @@ -2633,15 +3213,15 @@ end test 'consecutive terms in horizontal list should share same entry in docbook' do - input = <<-EOS -[horizontal] -term:: -alt term:: -description + input = <<~'EOS' + [horizontal] + term:: + alt term:: + description -last:: + last:: EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//row', output, 2 assert_xpath '(//row)[1]/entry', output, 2 assert_xpath '((//row)[1]/entry)[1]/simpara', output, 2 @@ -2649,17 +3229,17 @@ assert_xpath '((//row)[2]/entry)[2][normalize-space(text())=""]', output, 1 end - test 'should render horizontal list in docbook with proper markup' do - input = <<-EOS -.Terms -[horizontal] -first term:: description -+ -more detail + test 'should convert horizontal list in docbook with proper markup' do + input = <<~'EOS' + .Terms + [horizontal] + first term:: description + + + more detail -second term:: description + second term:: description EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/table', output, 1 assert_xpath '/table[@tabstyle="horizontal"]', output, 1 assert_xpath '/table[@tabstyle="horizontal"]/title[text()="Terms"]', output, 1 @@ -2669,17 +3249,18 @@ assert_xpath '((/table//row)[1]/entry)[2]/simpara', output, 2 end - test 'should render qanda list in HTML with proper semantics' do - input = <<-EOS -[qanda] -Question 1:: - Answer 1. -Question 2:: - Answer 2. -+ -NOTE: A note about Answer 2. + test 'should convert qanda list in HTML with proper semantics' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [qanda] + Question 1:: + Answer 1. + Question 2:: + Answer 2. + + + NOTE: A note about Answer 2. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.qlist.qanda', output, 1 assert_css '.qanda > ol', output, 1 assert_css '.qanda > ol > li', output, 2 @@ -2693,17 +3274,18 @@ assert_xpath "/*[@class = 'qlist qanda']/ol/li[2]/p[2]/following-sibling::div[@class='admonitionblock note']", output, 1 end - test 'should render qanda list in DocBook with proper semantics' do - input = <<-EOS -[qanda] -Question 1:: - Answer 1. -Question 2:: - Answer 2. -+ -NOTE: A note about Answer 2. + test 'should convert qanda list in DocBook with proper semantics' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [qanda] + Question 1:: + Answer 1. + Question 2:: + Answer 2. + + + NOTE: A note about Answer 2. EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_css 'qandaset', output, 1 assert_css 'qandaset > qandaentry', output, 2 (1..2).each do |idx| @@ -2718,15 +3300,15 @@ end test 'consecutive questions should share same question element in docbook' do - input = <<-EOS -[qanda] -question:: -follow-up question:: -response + input = <<~'EOS' + [qanda] + question:: + follow-up question:: + response -last question:: + last question:: EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//qandaentry', output, 2 assert_xpath '(//qandaentry)[1]/question', output, 1 assert_xpath '(//qandaentry)[1]/question/simpara', output, 2 @@ -2735,16 +3317,17 @@ assert_xpath '(//qandaentry)[2]/answer[normalize-space(text())=""]', output, 1 end - test 'should render bibliography list with proper semantics' do - input = <<-EOS -[bibliography] -- [[[taoup]]] Eric Steven Raymond. 'The Art of Unix - Programming'. Addison-Wesley. ISBN 0-13-142901-9. -- [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. - 'DocBook - The Definitive Guide'. O'Reilly & Associates. 1999. - ISBN 1-56592-580-7. + test 'should convert bibliography list with proper semantics' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [bibliography] + - [[[taoup]]] Eric Steven Raymond. 'The Art of Unix + Programming'. Addison-Wesley. ISBN 0-13-142901-9. + - [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. + 'DocBook - The Definitive Guide'. O'Reilly & Associates. 1999. + ISBN 1-56592-580-7. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.ulist.bibliography', output, 1 assert_css '.ulist.bibliography ul', output, 1 assert_css '.ulist.bibliography ul li', output, 2 @@ -2755,16 +3338,17 @@ assert text.text.start_with?('[taoup] ') end - test 'should render bibliography list with proper semantics to DocBook' do - input = <<-EOS -[bibliography] -- [[[taoup]]] Eric Steven Raymond. 'The Art of Unix - Programming'. Addison-Wesley. ISBN 0-13-142901-9. -- [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. - 'DocBook - The Definitive Guide'. O'Reilly & Associates. 1999. - ISBN 1-56592-580-7. + test 'should convert bibliography list with proper semantics to DocBook' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [bibliography] + - [[[taoup]]] Eric Steven Raymond. 'The Art of Unix + Programming'. Addison-Wesley. ISBN 0-13-142901-9. + - [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. + 'DocBook - The Definitive Guide'. O'Reilly & Associates. 1999. + ISBN 1-56592-580-7. EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_css 'bibliodiv', output, 1 assert_css 'bibliodiv > bibliomixed', output, 2 assert_css 'bibliodiv > bibliomixed > bibliomisc', output, 2 @@ -2773,6 +3357,129 @@ assert_css 'bibliodiv > bibliomixed:nth-child(2) > bibliomisc > anchor', output, 1 assert_css 'bibliodiv > bibliomixed:nth-child(2) > bibliomisc > anchor[xreflabel="[walsh-muellner]"]', output, 1 end + + test 'should warn if a bibliography ID is already in use' do + input = <<~'EOS' + [bibliography] + * [[[Fowler]]] Fowler M. _Analysis Patterns: Reusable Object Models_. + Addison-Wesley. 1997. + * [[[Fowler]]] Fowler M. _Analysis Patterns: Reusable Object Models_. + Addison-Wesley. 1997. + EOS + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_css '.ulist.bibliography', output, 1 + assert_css '.ulist.bibliography ul li:nth-child(1) p a#Fowler', output, 1 + assert_css '.ulist.bibliography ul li:nth-child(2) p a#Fowler', output, 1 + assert_message logger, :WARN, ': line 4: id assigned to bibliography anchor already in use: Fowler', Hash + end + end + + test 'should automatically add bibliography style to top-level lists in bibliography section' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [bibliography] + == Bibliography + + .Books + * [[[taoup]]] Eric Steven Raymond. _The Art of Unix + Programming_. Addison-Wesley. ISBN 0-13-142901-9. + * [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. + _DocBook - The Definitive Guide_. O'Reilly & Associates. 1999. + ISBN 1-56592-580-7. + + .Periodicals + * [[[doc-writer]]] Doc Writer. _Documentation As Code_. Static Times, 54. August 2016. + EOS + doc = document_from_string input + ulists = doc.find_by context: :ulist + assert_equal 2, ulists.size + assert_equal ulists[0].style, 'bibliography' + assert_equal ulists[1].style, 'bibliography' + end + + test 'should not recognize bibliography anchor that begins with a digit' do + input = <<~'EOS' + [bibliography] + - [[[1984]]] George Orwell. '1984'. New American Library. 1950. + EOS + + output = convert_string_to_embedded input + assert_includes output, '[[[1984]]]' + assert_xpath '//a[@id="1984"]', output, 0 + end + + test 'should recognize bibliography anchor that contains a digit but does not start with one' do + input = <<~'EOS' + [bibliography] + - [[[_1984]]] George Orwell. '1984'. New American Library. 1950. + EOS + + output = convert_string_to_embedded input + refute_includes output, '[[[_1984]]]' + assert_includes output, '[_1984]' + assert_xpath '//a[@id="_1984"]', output, 1 + end + + test 'should catalog bibliography anchors in bibliography list' do + input = <<~'EOS' + = Article Title + + Please read <>. + + [bibliography] + == References + + * [[[Fowler_1997]]] Fowler M. _Analysis Patterns: Reusable Object Models_. Addison-Wesley. 1997. + EOS + + doc = document_from_string input + assert doc.catalog[:refs].key? 'Fowler_1997' + end + + test 'should use reftext from bibliography anchor at xref and entry' do + input = <<~'EOS' + = Article Title + + Begin with <>. + Then move on to <>. + + [bibliography] + == References + + * [[[TMMM]]] Brooks F. _The Mythical Man-Month_. Addison-Wesley. 1975. + * [[[Fowler_1997,1]]] Fowler M. _Analysis Patterns: Reusable Object Models_. Addison-Wesley. 1997. + EOS + + doc = document_from_string input, standalone: false + tmmm_ref = doc.catalog[:refs]['TMMM'] + refute_nil tmmm_ref + assert_nil tmmm_ref.reftext + fowler_1997_ref = doc.catalog[:refs]['Fowler_1997'] + refute_nil fowler_1997_ref + assert_equal '[1]', fowler_1997_ref.reftext + result = doc.convert standalone: false + assert_xpath '//a[@href="#Fowler_1997"]', result, 1 + assert_xpath '//a[@href="#Fowler_1997"][text()="[1]"]', result, 1 + assert_xpath '//a[@id="Fowler_1997"]', result, 1 + fowler_1997_text = (xmlnodes_at_xpath '(//a[@id="Fowler_1997"])[1]/following-sibling::text()', result, 1).text + assert fowler_1997_text.start_with?('[1] ') + assert_xpath '//a[@href="#TMMM"]', result, 1 + assert_xpath '//a[@href="#TMMM"][text()="[TMMM]"]', result, 1 + assert_xpath '//a[@id="TMMM"]', result, 1 + tmmm_text = (xmlnodes_at_xpath '(//a[@id="TMMM"])[1]/following-sibling::text()', result, 1).text + assert tmmm_text.start_with?('[TMMM] ') + end + + test 'should assign reftext of bibliography anchor to xreflabel in DocBook backend' do + input = <<~'EOS' + [bibliography] + * [[[Fowler_1997,1]]] Fowler M. _Analysis Patterns: Reusable Object Models_. Addison-Wesley. 1997. + EOS + + result = convert_string_to_embedded input, backend: :docbook + assert_includes result, '' + end end end @@ -2781,63 +3488,63 @@ context 'Label without text on same line' do test 'folds text from subsequent line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -def1 + term1:: + def1 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from first line after blank lines' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -def1 + def1 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from first line after blank line and immediately preceding next item' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -def1 -term2:: def2 + def1 + term2:: def2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="def1"]', output, 1 end test 'paragraph offset by blank lines does not break list if label does not have inline text' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -def1 + def1 -term2:: def2 + term2:: def2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'dl', output, 1 assert_css 'dl > dt', output, 2 assert_css 'dl > dd', output, 2 @@ -2845,168 +3552,170 @@ end test 'folds text from first line after comment line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -// comment -def1 + term1:: + // comment + def1 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from line following comment line offset by blank line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -// comment -def1 + // comment + def1 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from subsequent indented line' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: - def1 + term1:: + def1 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from indented line after blank line' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: + term1:: - def1 + def1 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text that looks like ruler offset by blank line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -''' + ''' EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="'''"]), output, 1 end test 'folds text that looks like ruler offset by blank line and line comment' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -// comment -''' + // comment + ''' EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="'''"]), output, 1 end test 'folds text that looks like ruler and the line following it offset by blank line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -''' -continued + ''' + continued EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[normalize-space(text())="''' continued"]), output, 1 end test 'folds text that looks like title offset by blank line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -.def1 + .def1 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()=".def1"]', output, 1 end test 'folds text that looks like title offset by blank line and line comment' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -// comment -.def1 + // comment + .def1 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()=".def1"]', output, 1 end test 'folds text that looks like admonition offset by blank line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -NOTE: def1 + NOTE: def1 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="NOTE: def1"]', output, 1 end test 'folds text that looks like section title offset by blank line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -== Another Section + == Another Section EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="== Another Section"]', output, 1 @@ -3014,20 +3723,21 @@ end test 'folds text of first literal line offset by blank line appends subsequent literals offset by blank line as blocks' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: + term1:: - def1 + def1 - literal + literal - literal + literal EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3036,18 +3746,19 @@ end test 'folds text of subsequent line and appends following literal line offset by blank line as block if term has no inline description' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: -def1 + term1:: + def1 - literal + literal -term2:: def2 + term2:: def2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="def1"]', output, 1 @@ -3056,15 +3767,16 @@ end test 'appends literal line attached by continuation as block if item has no inline description' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: -+ - literal + term1:: + + + literal EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3073,17 +3785,18 @@ end test 'appends literal line attached by continuation as block if item has no inline description followed by ruler' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: -+ - literal + term1:: + + + literal -''' + ''' EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3093,17 +3806,17 @@ end test 'appends line attached by continuation as block if item has no inline description followed by ruler' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -+ -para + term1:: + + + para -''' + ''' EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3113,19 +3826,19 @@ end test 'appends line attached by continuation as block if item has no inline description followed by block' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -+ -para + term1:: + + + para -.... -literal -.... + .... + literal + .... EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3136,20 +3849,20 @@ end test 'appends block attached by continuation but not subsequent block not attached by continuation' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -+ -.... -literal -.... -.... -detached -.... + term1:: + + + .... + literal + .... + .... + detached + .... EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3160,17 +3873,17 @@ end test 'appends list if item has no inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -* one -* two -* three + * one + * two + * three EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3178,18 +3891,18 @@ end test 'appends list to first term when followed immediately by second term' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -* one -* two -* three -term2:: def2 + * one + * two + * three + term2:: def2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p', output, 0 @@ -3198,21 +3911,22 @@ end test 'appends indented list to first term that is adjacent to second term' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists + + label 1:: + description 1 + + * one + * two + * three + label 2:: + description 2 -label 1:: - description 1 - - * one - * two - * three -label 2:: - description 2 - -paragraph + paragraph EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.dlist > dl', output, 1 assert_css '.dlist dt', output, 2 assert_xpath '(//*[@class="dlist"]//dt)[1][normalize-space(text())="label 1"]', output, 1 @@ -3226,21 +3940,22 @@ end test 'appends indented list to first term that is attached by a continuation and adjacent to second term' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists + + label 1:: + description 1 + + + * one + * two + * three + label 2:: + description 2 -label 1:: - description 1 -+ - * one - * two - * three -label 2:: - description 2 - -paragraph + paragraph EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.dlist > dl', output, 1 assert_css '.dlist dt', output, 2 assert_xpath '(//*[@class="dlist"]//dt)[1][normalize-space(text())="label 1"]', output, 1 @@ -3254,20 +3969,20 @@ end test 'appends list and paragraph block when line following list attached by continuation' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -* one -* two -* three + * one + * two + * three -+ -para + + + para EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3278,19 +3993,19 @@ end test 'first continued line associated with nested list item and second continued line associated with term' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -* one -+ -nested list para + term1:: + * one + + + nested list para -+ -term1 para + + + term1 para EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3302,19 +4017,20 @@ end test 'literal line attached by continuation swallows adjacent line that looks like term' do - input = <<-EOS -== Lists - -term1:: -+ - literal -notnestedterm::: -+ - literal -notnestedterm::: + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists + + term1:: + + + literal + notnestedterm::: + + + literal + notnestedterm::: EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3323,15 +4039,15 @@ end test 'line attached by continuation is appended as paragraph if term has no inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -+ -para + term1:: + + + para EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3339,67 +4055,67 @@ assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 end - test 'attached paragraph does not break on adjacent nested labeled list term' do - input = <<-EOS -term1:: def -+ -more description -not a term::: def + test 'attached paragraph does not break on adjacent nested description list term' do + input = <<~'EOS' + term1:: def + + + more description + not a term::: def EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.dlist > dl > dt', output, 1 assert_css '.dlist > dl > dd', output, 1 assert_css '.dlist > dl > dd > .paragraph', output, 1 - assert output.include?('not a term::: def') + assert_includes output, 'not a term::: def' end # FIXME pending =begin - test 'attached paragraph does not break on adjacent sibling labeled list term' do - input = <<-EOS -term1:: def -+ -more description -not a term:: def + test 'attached paragraph does not break on adjacent sibling description list term' do + input = <<~'EOS' + term1:: def + + + more description + not a term:: def EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.dlist > dl > dt', output, 1 assert_css '.dlist > dl > dd', output, 1 assert_css '.dlist > dl > dd > .paragraph', output, 1 - assert output.include?('not a term:: def') + assert_includes output, 'not a term:: def' end =end - test 'attached styled paragraph does not break on adjacent nested labeled list term' do - input = <<-EOS -term1:: def -+ -[quote] -more description -not a term::: def + test 'attached styled paragraph does not break on adjacent nested description list term' do + input = <<~'EOS' + term1:: def + + + [quote] + more description + not a term::: def EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.dlist > dl > dt', output, 1 assert_css '.dlist > dl > dd', output, 1 assert_css '.dlist > dl > dd > .quoteblock', output, 1 - assert output.include?('not a term::: def') + assert_includes output, 'not a term::: def' end test 'appends line as paragraph if attached by continuation following blank line and line comment when term has no inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -// comment -+ -para + // comment + + + para EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3408,16 +4124,16 @@ end test 'line attached by continuation offset by blank line is appended as paragraph if term has no inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: + term1:: -+ -para + + + para EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 @@ -3426,16 +4142,16 @@ end test 'delimited block breaks list even when term has no inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -==== -detached -==== + term1:: + ==== + detached + ==== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 0 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="exampleblock"]', output, 1 @@ -3443,15 +4159,15 @@ end test 'attribute line breaks list even when term has no inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -[verse] -detached + term1:: + [verse] + detached EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 0 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="verseblock"]', output, 1 @@ -3459,15 +4175,15 @@ end test 'id line breaks list even when term has no inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: -[[id]] -detached + term1:: + [[id]] + detached EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 0 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 1 @@ -3478,73 +4194,75 @@ context 'Item with text inline' do test 'folds text from inline description and subsequent line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 -continued + term1:: def1 + continued EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued"]), output, 1 end test 'folds text from inline description and subsequent lines' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 -continued -continued + term1:: def1 + continued + continued EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued\ncontinued"]), output, 1 end test 'folds text from inline description and line following comment line' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 -// comment -continued + term1:: def1 + // comment + continued EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued"]), output, 1 end test 'folds text from inline description and subsequent indented line' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == List -term1:: def1 - continued + term1:: def1 + continued EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued"]), output, 1 end test 'appends literal line offset by blank line as block if item has inline description' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: def1 + term1:: def1 - literal + literal EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3553,17 +4271,18 @@ end test 'appends literal line offset by blank line as block and appends line after continuation as block if item has inline description' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: def1 + term1:: def1 - literal -+ -para + literal + + + para EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3574,17 +4293,18 @@ end test 'appends line after continuation as block and literal line offset by blank line as block if item has inline description' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: def1 -+ -para + term1:: def1 + + + para - literal + literal EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3595,17 +4315,17 @@ end test 'appends list if item has inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 + term1:: def1 -* one -* two -* three + * one + * two + * three EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="ulist"]', output, 1 @@ -3613,17 +4333,18 @@ end test 'appends literal line attached by continuation as block if item has inline description followed by ruler' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term1:: def1 -+ - literal + term1:: def1 + + + literal -''' + ''' EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3633,15 +4354,15 @@ end test 'line offset by blank line breaks list if term has inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 + term1:: def1 -detached + detached EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3650,19 +4371,20 @@ end test 'nested term with description does not consume following heading' do - input = <<-EOS -== Lists + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + == Lists -term:: - def - nestedterm;; - nesteddef + term:: + def + nestedterm;; + nesteddef -Detached -~~~~~~~~ + Detached + ~~~~~~~~ EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 2 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '//*[@class="dlist"]/dl//dl', output, 1 @@ -3675,17 +4397,17 @@ end test 'line attached by continuation is appended as paragraph if term has inline description followed by detached paragraph' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 -+ -para + term1:: def1 + + + para -detached + detached EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3696,19 +4418,19 @@ end test 'line attached by continuation is appended as paragraph if term has inline description followed by detached block' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 -+ -para + term1:: def1 + + + para -**** -detached -**** + **** + detached + **** EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3719,16 +4441,16 @@ end test 'line attached by continuation offset by line comment is appended as paragraph if term has inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 -// comment -+ -para + term1:: def1 + // comment + + + para EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3737,16 +4459,16 @@ end test 'line attached by continuation offset by blank line is appended as paragraph if term has inline description' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 + term1:: def1 -+ -para + + + para EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 @@ -3755,47 +4477,47 @@ end test 'line comment offset by blank line divides lists because item has text' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 + term1:: def1 -// + // -term2:: def2 + term2:: def2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 2 end test 'ruler offset by blank line divides lists because item has text' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 + term1:: def1 -''' + ''' -term2:: def2 + term2:: def2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 2 end test 'block title offset by blank line divides lists and becomes title of second list because item has text' do - input = <<-EOS -== Lists + input = <<~'EOS' + == Lists -term1:: def1 + term1:: def1 -.title + .title -term2:: def2 + term2:: def2 EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 2 assert_xpath '(//*[@class="dlist"])[2]/*[@class="title"][text()="title"]', output, 1 end @@ -3803,24 +4525,36 @@ end context 'Callout lists' do + test 'does not recognize callout list denoted by markers that only have a trailing bracket' do + input = <<~'EOS' + ---- + require 'asciidoctor' # <1> + ---- + 1> Not a callout list item + EOS + + output = convert_string_to_embedded input + assert_css '.colist', output, 0 + end + test 'listing block with sequential callouts followed by adjacent callout list' do - input = <<-EOS -[source, ruby] ----- -require 'asciidoctor' # <1> -doc = Asciidoctor::Document.new('Hello, World!') # <2> -puts doc.render # <3> ----- -<1> Describe the first line -<2> Describe the second line -<3> Describe the third line + input = <<~'EOS' + [source, ruby] + ---- + require 'asciidoctor' # <1> + doc = Asciidoctor::Document.new('Hello, World!') # <2> + puts doc.convert # <3> + ---- + <1> Describe the first line + <2> Describe the second line + <3> Describe the third line EOS - output = render_string input, :attributes => {'backend' => 'docbook45'} + output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 - assert_xpath '(//programlisting//co)[1][@id = "CO1-1"]', output, 1 - assert_xpath '(//programlisting//co)[2][@id = "CO1-2"]', output, 1 - assert_xpath '(//programlisting//co)[3][@id = "CO1-3"]', output, 1 + assert_xpath '(//programlisting//co)[1][@xml:id="CO1-1"]', output, 1 + assert_xpath '(//programlisting//co)[2][@xml:id="CO1-2"]', output, 1 + assert_xpath '(//programlisting//co)[3][@xml:id="CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 3 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-2"]', output, 1 @@ -3828,26 +4562,26 @@ end test 'listing block with sequential callouts followed by non-adjacent callout list' do - input = <<-EOS -[source, ruby] ----- -require 'asciidoctor' # <1> -doc = Asciidoctor::Document.new('Hello, World!') # <2> -puts doc.render # <3> ----- - -Paragraph. - -<1> Describe the first line -<2> Describe the second line -<3> Describe the third line + input = <<~'EOS' + [source, ruby] + ---- + require 'asciidoctor' # <1> + doc = Asciidoctor::Document.new('Hello, World!') # <2> + puts doc.convert # <3> + ---- + + Paragraph. + + <1> Describe the first line + <2> Describe the second line + <3> Describe the third line EOS - output = render_string input, :attributes => {'backend' => 'docbook45'} + output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 - assert_xpath '(//programlisting//co)[1][@id = "CO1-1"]', output, 1 - assert_xpath '(//programlisting//co)[2][@id = "CO1-2"]', output, 1 - assert_xpath '(//programlisting//co)[3][@id = "CO1-3"]', output, 1 + assert_xpath '(//programlisting//co)[1][@xml:id="CO1-1"]', output, 1 + assert_xpath '(//programlisting//co)[2][@xml:id="CO1-2"]', output, 1 + assert_xpath '(//programlisting//co)[3][@xml:id="CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::*[1][self::simpara]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 3 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 @@ -3856,45 +4590,45 @@ end test 'listing block with a callout that refers to two different lines' do - input = <<-EOS -[source, ruby] ----- -require 'asciidoctor' # <1> -doc = Asciidoctor::Document.new('Hello, World!') # <2> -puts doc.render # <2> ----- -<1> Import the library -<2> Where the magic happens + input = <<~'EOS' + [source, ruby] + ---- + require 'asciidoctor' # <1> + doc = Asciidoctor::Document.new('Hello, World!') # <2> + puts doc.convert # <2> + ---- + <1> Import the library + <2> Where the magic happens EOS - output = render_string input, :attributes => {'backend' => 'docbook45'} + output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 - assert_xpath '(//programlisting//co)[1][@id = "CO1-1"]', output, 1 - assert_xpath '(//programlisting//co)[2][@id = "CO1-2"]', output, 1 - assert_xpath '(//programlisting//co)[3][@id = "CO1-3"]', output, 1 + assert_xpath '(//programlisting//co)[1][@xml:id="CO1-1"]', output, 1 + assert_xpath '(//programlisting//co)[2][@xml:id="CO1-2"]', output, 1 + assert_xpath '(//programlisting//co)[3][@xml:id="CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 2 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-2 CO1-3"]', output, 1 end - test 'listing block with non-sequential callouts followed by adjacent callout list' do - input = <<-EOS -[source, ruby] ----- -require 'asciidoctor' # <2> -doc = Asciidoctor::Document.new('Hello, World!') # <3> -puts doc.render # <1> ----- -<1> Describe the first line -<2> Describe the second line -<3> Describe the third line + test 'source block with non-sequential callouts followed by adjacent callout list' do + input = <<~'EOS' + [source,ruby] + ---- + require 'asciidoctor' # <2> + doc = Asciidoctor::Document.new('Hello, World!') # <3> + puts doc.convert # <1> + ---- + <1> Describe the first line + <2> Describe the second line + <3> Describe the third line EOS - output = render_string input, :attributes => {'backend' => 'docbook45'} + output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 - assert_xpath '(//programlisting//co)[1][@id = "CO1-1"]', output, 1 - assert_xpath '(//programlisting//co)[2][@id = "CO1-2"]', output, 1 - assert_xpath '(//programlisting//co)[3][@id = "CO1-3"]', output, 1 + assert_xpath '(//programlisting//co)[1][@xml:id="CO1-1"]', output, 1 + assert_xpath '(//programlisting//co)[2][@xml:id="CO1-2"]', output, 1 + assert_xpath '(//programlisting//co)[3][@xml:id="CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 3 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-3"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-1"]', output, 1 @@ -3902,61 +4636,61 @@ end test 'two listing blocks can share the same callout list' do - input = <<-EOS -.Import library -[source, ruby] ----- -require 'asciidoctor' # <1> ----- - -.Use library -[source, ruby] ----- -doc = Asciidoctor::Document.new('Hello, World!') # <2> -puts doc.render # <3> ----- - -<1> Describe the first line -<2> Describe the second line -<3> Describe the third line + input = <<~'EOS' + .Import library + [source, ruby] + ---- + require 'asciidoctor' # <1> + ---- + + .Use library + [source, ruby] + ---- + doc = Asciidoctor::Document.new('Hello, World!') # <2> + puts doc.convert # <3> + ---- + + <1> Describe the first line + <2> Describe the second line + <3> Describe the third line EOS - output = render_string input, :attributes => {'backend' => 'docbook45'} + output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 2 assert_xpath '(//programlisting)[1]//co', output, 1 - assert_xpath '(//programlisting)[1]//co[@id = "CO1-1"]', output, 1 + assert_xpath '(//programlisting)[1]//co[@xml:id="CO1-1"]', output, 1 assert_xpath '(//programlisting)[2]//co', output, 2 - assert_xpath '((//programlisting)[2]//co)[1][@id = "CO1-2"]', output, 1 - assert_xpath '((//programlisting)[2]//co)[2][@id = "CO1-3"]', output, 1 + assert_xpath '((//programlisting)[2]//co)[1][@xml:id="CO1-2"]', output, 1 + assert_xpath '((//programlisting)[2]//co)[2][@xml:id="CO1-3"]', output, 1 assert_xpath '(//calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//calloutlist/callout)[2][@arearefs = "CO1-2"]', output, 1 assert_xpath '(//calloutlist/callout)[3][@arearefs = "CO1-3"]', output, 1 end test 'two listing blocks each followed by an adjacent callout list' do - input = <<-EOS -.Import library -[source, ruby] ----- -require 'asciidoctor' # <1> ----- -<1> Describe the first line - -.Use library -[source, ruby] ----- -doc = Asciidoctor::Document.new('Hello, World!') # <1> -puts doc.render # <2> ----- -<1> Describe the second line -<2> Describe the third line + input = <<~'EOS' + .Import library + [source, ruby] + ---- + require 'asciidoctor' # <1> + ---- + <1> Describe the first line + + .Use library + [source, ruby] + ---- + doc = Asciidoctor::Document.new('Hello, World!') # <1> + puts doc.convert # <2> + ---- + <1> Describe the second line + <2> Describe the third line EOS - output = render_string input, :attributes => {'backend' => 'docbook45'} + output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 2 assert_xpath '(//programlisting)[1]//co', output, 1 - assert_xpath '(//programlisting)[1]//co[@id = "CO1-1"]', output, 1 + assert_xpath '(//programlisting)[1]//co[@xml:id="CO1-1"]', output, 1 assert_xpath '(//programlisting)[2]//co', output, 2 - assert_xpath '((//programlisting)[2]//co)[1][@id = "CO2-1"]', output, 1 - assert_xpath '((//programlisting)[2]//co)[2][@id = "CO2-2"]', output, 1 + assert_xpath '((//programlisting)[2]//co)[1][@xml:id="CO2-1"]', output, 1 + assert_xpath '((//programlisting)[2]//co)[2][@xml:id="CO2-2"]', output, 1 assert_xpath '//calloutlist', output, 2 assert_xpath '(//calloutlist)[1]/callout', output, 1 assert_xpath '((//calloutlist)[1]/callout)[1][@arearefs = "CO1-1"]', output, 1 @@ -3965,24 +4699,49 @@ assert_xpath '((//calloutlist)[2]/callout)[2][@arearefs = "CO2-2"]', output, 1 end - test 'callout list with block content' do - input = <<-EOS -[source, ruby] ----- -require 'asciidoctor' # <1> -doc = Asciidoctor::Document.new('Hello, World!') # <2> -puts doc.render # <3> ----- -<1> Imports the library -as a RubyGem -<2> Creates a new document -* Scans the lines for known blocks -* Converts the lines into blocks -<3> Renders the document -+ -You can write this to file rather than printing to stdout. + test 'callout list retains block content' do + input = <<~'EOS' + [source, ruby] + ---- + require 'asciidoctor' # <1> + doc = Asciidoctor::Document.new('Hello, World!') # <2> + puts doc.convert # <3> + ---- + <1> Imports the library + as a RubyGem + <2> Creates a new document + * Scans the lines for known blocks + * Converts the lines into blocks + <3> Renders the document + + + You can write this to file rather than printing to stdout. EOS - output = render_string input, :attributes => {'backend' => 'docbook45'} + output = convert_string_to_embedded input + assert_xpath '//ol/li', output, 3 + assert_xpath %((//ol/li)[1]/p[text()="Imports the library\nas a RubyGem"]), output, 1 + assert_xpath %((//ol/li)[2]//ul), output, 1 + assert_xpath %((//ol/li)[2]//ul/li), output, 2 + assert_xpath %((//ol/li)[3]//p), output, 2 + end + + test 'callout list retains block content when converted to DocBook' do + input = <<~'EOS' + [source, ruby] + ---- + require 'asciidoctor' # <1> + doc = Asciidoctor::Document.new('Hello, World!') # <2> + puts doc.convert # <3> + ---- + <1> Imports the library + as a RubyGem + <2> Creates a new document + * Scans the lines for known blocks + * Converts the lines into blocks + <3> Renders the document + + + You can write this to file rather than printing to stdout. + EOS + output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//calloutlist', output, 1 assert_xpath '//calloutlist/callout', output, 3 assert_xpath '(//calloutlist/callout)[1]/*', output, 1 @@ -3993,44 +4752,70 @@ end test 'escaped callout should not be interpreted as a callout' do - input = <<-EOS -[source, ruby] ----- -require 'asciidoctor' # \\<1> ----- + input = <<~'EOS' + [source,text] + ---- + require 'asciidoctor' # \<1> + Asciidoctor.convert 'convert me!' \<2> + ---- EOS - output = render_string input, :attributes => {'backend' => 'docbook45'} - assert_xpath '//co', output, 0 + [{}, { 'source-highlighter' => 'coderay' }].each do |attributes| + output = convert_string_to_embedded input, attributes: attributes + assert_css 'pre b', output, 0 + assert_includes output, ' # <1>' + assert_includes output, ' <2>' + end + end + + test 'should autonumber <.> callouts' do + input = <<~'EOS' + [source, ruby] + ---- + require 'asciidoctor' # <.> + doc = Asciidoctor::Document.new('Hello, World!') # <.> + puts doc.convert # <.> + ---- + <.> Describe the first line + <.> Describe the second line + <.> Describe the third line + EOS + output = convert_string_to_embedded input + pre_html = (xmlnodes_at_css 'pre', output)[0].inner_html + assert_includes pre_html, '(1)' + assert_includes pre_html, '(2)' + assert_includes pre_html, '(3)' + assert_css '.colist ol', output, 1 + assert_css '.colist ol li', output, 3 end test 'should not recognize callouts in middle of line' do - input = <<-EOS -[source, ruby] ----- -puts "The syntax <1> at the end of the line makes a code callout" ----- + input = <<~'EOS' + [source, ruby] + ---- + puts "The syntax <1> at the end of the line makes a code callout" + ---- EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//b', output, 0 end test 'should allow multiple callouts on the same line' do - input = <<-EOS -[source, ruby] ----- -require 'asciidoctor' <1> -doc = Asciidoctor.load('Hello, World!') # <2> <3> <4> -puts doc.render <5><6> -exit 0 ----- -<1> Require library -<2> Load document from String -<3> Uses default backend and doctype -<4> One more for good luck -<5> Renders document to String -<6> Prints output to stdout + input = <<~'EOS' + [source, ruby] + ---- + require 'asciidoctor' <1> + doc = Asciidoctor.load('Hello, World!') # <2> <3> <4> + puts doc.convert <5><6> + exit 0 + ---- + <1> Require library + <2> Load document from String + <3> Uses default backend and doctype + <4> One more for good luck + <5> Renders document to String + <6> Prints output to stdout EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//code/b', output, 6 assert_match(/ \(1\)<\/b>$/, output) assert_match(/ \(2\)<\/b> \(3\)<\/b> \(4\)<\/b>$/, output) @@ -4038,91 +4823,159 @@ end test 'should allow XML comment-style callouts' do - input = <<-EOS -[source, xml] ----- -
    - Section Title - Just a paragraph -
    ----- -<1> The title is required -<2> The content isn't + input = <<~'EOS' + [source, xml] + ---- +
    + Section Title + Just a paragraph +
    + ---- + <1> The title is required + <2> The content isn't EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//b', output, 2 assert_xpath '//b[text()="(1)"]', output, 1 assert_xpath '//b[text()="(2)"]', output, 1 end test 'should not allow callouts with half an XML comment' do - input = <<-EOS ----- -First line <1--> -Second line <2--> ----- + input = <<~'EOS' + ---- + First line <1--> + Second line <2--> + ---- EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//b', output, 0 end - test 'should not recognize callouts in an indented labeled list paragraph' do - input = <<-EOS -foo:: - bar <1> - -<1> Not pointing to a callout - EOS - output = render_embedded_string input - assert_xpath '//dl//b', output, 0 - assert_xpath '//dl/dd/p[text()="bar <1>"]', output, 1 - assert_xpath '//ol/li/p[text()="Not pointing to a callout"]', output, 1 + test 'should not recognize callouts in an indented description list paragraph' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + foo:: + bar <1> + + <1> Not pointing to a callout + EOS + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_xpath '//dl//b', output, 0 + assert_xpath '//dl/dd/p[text()="bar <1>"]', output, 1 + assert_xpath '//ol/li/p[text()="Not pointing to a callout"]', output, 1 + assert_message logger, :WARN, ': line 4: no callout found for <1>', Hash + end end test 'should not recognize callouts in an indented outline list paragraph' do - input = <<-EOS -* foo - bar <1> - -<1> Not pointing to a callout - EOS - output = render_embedded_string input - assert_xpath '//ul//b', output, 0 - assert_xpath %(//ul/li/p[text()="foo\nbar <1>"]), output, 1 - assert_xpath '//ol/li/p[text()="Not pointing to a callout"]', output, 1 - end - - test 'should remove line comment chars that precedes callout number' do - input = <<-EOS -[source,ruby] ----- -puts 'Hello, world!' # <1> ----- -<1> Ruby - -[source,groovy] ----- -println 'Hello, world!' // <1> ----- -<1> Groovy - -[source,clojure] ----- -(def hello (fn [] "Hello, world!")) ;; <1> -(hello) ----- -<1> Clojure - -[source,haskell] ----- -main = putStrLn "Hello, World!" -- <1> ----- -<1> Haskell + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + * foo + bar <1> + + <1> Not pointing to a callout EOS - [{}, {'source-highlighter' => 'coderay'}].each do |attributes| - output = render_embedded_string input, :attributes => attributes + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_xpath '//ul//b', output, 0 + assert_xpath %(//ul/li/p[text()="foo\nbar <1>"]), output, 1 + assert_xpath '//ol/li/p[text()="Not pointing to a callout"]', output, 1 + assert_message logger, :WARN, ': line 4: no callout found for <1>', Hash + end + end + + test 'should warn if numbers in callout list are out of sequence' do + input = <<~'EOS' + ---- + <1> + + + ---- + <1> Container of beans. + Beans are fun. + <3> An actual bean. + EOS + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_xpath '//ol/li', output, 2 + assert_messages logger, [ + [:WARN, ': line 8: callout list item index: expected 2, got 3', Hash], + [:WARN, ': line 8: no callout found for <2>', Hash] + ] + end + end + + test 'should preserve line comment chars that precede callout number if icons is not set' do + input = <<~'EOS' + [source,ruby] + ---- + puts 'Hello, world!' # <1> + ---- + <1> Ruby + + [source,groovy] + ---- + println 'Hello, world!' // <1> + ---- + <1> Groovy + + [source,clojure] + ---- + (def hello (fn [] "Hello, world!")) ;; <1> + (hello) + ---- + <1> Clojure + + [source,haskell] + ---- + main = putStrLn "Hello, World!" -- <1> + ---- + <1> Haskell + EOS + [{}, { 'source-highlighter' => 'coderay' }].each do |attributes| + output = convert_string_to_embedded input, attributes: attributes assert_xpath '//b', output, 4 nodes = xmlnodes_at_css 'pre', output + assert_equal %(puts 'Hello, world!' # (1)), nodes[0].text + assert_equal %(println 'Hello, world!' // (1)), nodes[1].text + assert_equal %((def hello (fn [] "Hello, world!")) ;; (1)\n(hello)), nodes[2].text + assert_equal %(main = putStrLn "Hello, World!" -- (1)), nodes[3].text + end + end + + test 'should remove line comment chars that precede callout number if icons is font' do + input = <<~'EOS' + [source,ruby] + ---- + puts 'Hello, world!' # <1> + ---- + <1> Ruby + + [source,groovy] + ---- + println 'Hello, world!' // <1> + ---- + <1> Groovy + + [source,clojure] + ---- + (def hello (fn [] "Hello, world!")) ;; <1> + (hello) + ---- + <1> Clojure + + [source,haskell] + ---- + main = putStrLn "Hello, World!" -- <1> + ---- + <1> Haskell + EOS + [{}, { 'source-highlighter' => 'coderay' }].each do |attributes| + output = convert_string_to_embedded input, attributes: attributes.merge({ 'icons' => 'font' }) + assert_css 'pre b', output, 4 + assert_css 'pre i.conum', output, 4 + nodes = xmlnodes_at_css 'pre', output assert_equal %(puts 'Hello, world!' (1)), nodes[0].text assert_equal %(println 'Hello, world!' (1)), nodes[1].text assert_equal %((def hello (fn [] "Hello, world!")) (1)\n(hello)), nodes[2].text @@ -4131,53 +4984,83 @@ end test 'should allow line comment chars that precede callout number to be specified' do - input = <<-EOS -[source,erlang,line-comment=%] ----- -hello_world() -> io:fwrite("hello, world\n"). % <1> ----- -<1> Erlang + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [source,erlang,line-comment=%] + ---- + hello_world() -> % <1> + io:fwrite("hello, world~n"). %<2> + ---- + <1> Erlang function clause head. + <2> ~n adds a new line to the output. EOS - output = render_embedded_string input + output = convert_string_to_embedded input + assert_xpath '//b', output, 2 + nodes = xmlnodes_at_css 'pre', output + assert_equal %(hello_world() -> % (1)\n io:fwrite("hello, world~n"). %(2)), nodes[0].text + end + + test 'should allow line comment chars preceding callout number to be configurable when source-highlighter is coderay' do + input = <<~'EOS' + [source,html,line-comment=-#] + ---- + -# <1> + %p Hello + ---- + <1> Prints a paragraph with the text "Hello" + EOS + output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'coderay' } assert_xpath '//b', output, 1 nodes = xmlnodes_at_css 'pre', output - assert_equal %(hello_world() -> io:fwrite("hello, world\n"). (1)), nodes[0].text + assert_equal %(-# (1)\n%p Hello), nodes[0].text + end + + test 'should not eat whitespace before callout number if line-comment attribute is empty' do + input = <<~'EOS' + [source,asciidoc,line-comment=] + ---- + -- <1> + ---- + <1> The start of an open block. + EOS + output = convert_string_to_embedded input, attributes: { 'icons' => 'font' } + assert_includes output, '-- -Violets are blue <2> -.... + input = <<~'EOS' + .... + Roses are red <1> + Violets are blue <2> + .... -<1> And so is Ruby -<2> But violet is more like purple + <1> And so is Ruby + <2> But violet is more like purple EOS - output = render_string input, :attributes => {'backend' => 'docbook45'} + output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//literallayout', output, 1 assert_xpath '//literallayout//co', output, 2 - assert_xpath '(//literallayout//co)[1][@id = "CO1-1"]', output, 1 - assert_xpath '(//literallayout//co)[2][@id = "CO1-2"]', output, 1 + assert_xpath '(//literallayout//co)[1][@xml:id="CO1-1"]', output, 1 + assert_xpath '(//literallayout//co)[2][@xml:id="CO1-2"]', output, 1 assert_xpath '//literallayout/following-sibling::*[1][self::calloutlist]/callout', output, 2 assert_xpath '(//literallayout/following-sibling::*[1][self::calloutlist]/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//literallayout/following-sibling::*[1][self::calloutlist]/callout)[2][@arearefs = "CO1-2"]', output, 1 end test 'callout list with icons enabled' do - input = <<-EOS -[source, ruby] ----- -require 'asciidoctor' # <1> -doc = Asciidoctor::Document.new('Hello, World!') # <2> -puts doc.render # <3> ----- -<1> Describe the first line -<2> Describe the second line -<3> Describe the third line + input = <<~'EOS' + [source, ruby] + ---- + require 'asciidoctor' # <1> + doc = Asciidoctor::Document.new('Hello, World!') # <2> + puts doc.convert # <3> + ---- + <1> Describe the first line + <2> Describe the second line + <3> Describe the third line EOS - output = render_embedded_string input, :attributes => {'icons' => ''} + output = convert_string_to_embedded input, attributes: { 'icons' => '' } assert_css '.listingblock code > img', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="listingblock"]//code/img)[#{i}][@src="./images/icons/callouts/#{i}.png"][@alt="#{i}"]), output, 1 @@ -4189,18 +5072,18 @@ end test 'callout list with font-based icons enabled' do - input = <<-EOS -[source] ----- -require 'asciidoctor' # <1> -doc = Asciidoctor::Document.new('Hello, World!') #<2> -puts doc.render #<3> ----- -<1> Describe the first line -<2> Describe the second line -<3> Describe the third line + input = <<~'EOS' + [source] + ---- + require 'asciidoctor' # <1> + doc = Asciidoctor::Document.new('Hello, World!') #<2> + puts doc.convert #<3> + ---- + <1> Describe the first line + <2> Describe the second line + <3> Describe the third line EOS - output = render_embedded_string input, :attributes => {'icons' => 'font'} + output = convert_string_to_embedded input, attributes: { 'icons' => 'font' } assert_css '.listingblock code > i', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="listingblock"]//code/i)[#{i}]), output, 1 @@ -4214,35 +5097,78 @@ assert_xpath %((/div[@class="colist arabic"]//td/i)[#{i}]/following-sibling::b[text() = "#{i}"]), output, 1 end end + + test 'should match trailing line separator in text of list item' do + input = <<~EOS.chop + ---- + A <1> + B <2> + C <3> + ---- + <1> a + <2> b#{decode_char 8232} + <3> c + EOS + + output = convert_string input + assert_css 'li', output, 3 + assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}"]), output, 1 + end + + test 'should match line separator in text of list item' do + input = <<~EOS.chop + ---- + A <1> + B <2> + C <3> + ---- + <1> a + <2> b#{decode_char 8232}b + <3> c + EOS + + output = convert_string input + assert_css 'li', output, 3 + assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}b"]), output, 1 + end end context 'Checklists' do test 'should create checklist if at least one item has checkbox syntax' do - input = <<-EOS -- [ ] todo -- [x] done -- [ ] another todo -- [*] another done -- plain + input = <<~'EOS' + - [ ] todo + - [x] done + - [ ] another todo + - [*] another done + - plain EOS - output = render_embedded_string input + doc = document_from_string input + checklist = doc.blocks[0] + assert checklist.option?('checklist') + assert checklist.items[0].attr?('checkbox') + refute checklist.items[0].attr?('checked') + assert checklist.items[1].attr?('checkbox') + assert checklist.items[1].attr?('checked') + refute checklist.items[4].attr?('checkbox') + + output = doc.convert standalone: false assert_css '.ulist.checklist', output, 1 - assert_xpath %((/*[@class="ulist checklist"]/ul/li)[1]/p[text()="#{expand_entity 10063} todo"]), output, 1 - assert_xpath %((/*[@class="ulist checklist"]/ul/li)[2]/p[text()="#{expand_entity 10003} done"]), output, 1 - assert_xpath %((/*[@class="ulist checklist"]/ul/li)[3]/p[text()="#{expand_entity 10063} another todo"]), output, 1 - assert_xpath %((/*[@class="ulist checklist"]/ul/li)[4]/p[text()="#{expand_entity 10003} another done"]), output, 1 + assert_xpath %((/*[@class="ulist checklist"]/ul/li)[1]/p[text()="#{decode_char 10063} todo"]), output, 1 + assert_xpath %((/*[@class="ulist checklist"]/ul/li)[2]/p[text()="#{decode_char 10003} done"]), output, 1 + assert_xpath %((/*[@class="ulist checklist"]/ul/li)[3]/p[text()="#{decode_char 10063} another todo"]), output, 1 + assert_xpath %((/*[@class="ulist checklist"]/ul/li)[4]/p[text()="#{decode_char 10003} another done"]), output, 1 assert_xpath '(/*[@class="ulist checklist"]/ul/li)[5]/p[text()="plain"]', output, 1 end test 'should create checklist with font icons if at least one item has checkbox syntax and icons attribute is font' do - input = <<-EOS -- [ ] todo -- [x] done -- plain + input = <<~'EOS' + - [ ] todo + - [x] done + - plain EOS - output = render_embedded_string input, :attributes => {'icons' => 'font'} + output = convert_string_to_embedded input, attributes: { 'icons' => 'font' } assert_css '.ulist.checklist', output, 1 assert_css '.ulist.checklist li i.fa-check-square-o', output, 1 assert_css '.ulist.checklist li i.fa-square-o', output, 1 @@ -4250,15 +5176,20 @@ end test 'should create interactive checklist if interactive option is set even with icons attribute is font' do - input = <<-EOS -:icons: font + input = <<~'EOS' + :icons: font -[options="interactive"] -- [ ] todo -- [x] done + [%interactive] + - [ ] todo + - [x] done EOS - output = render_embedded_string input + doc = document_from_string input + checklist = doc.blocks[0] + assert checklist.option?('checklist') + assert checklist.option?('interactive') + + output = doc.convert standalone: false assert_css '.ulist.checklist', output, 1 assert_css '.ulist.checklist li input[type="checkbox"]', output, 2 assert_css '.ulist.checklist li input[type="checkbox"][disabled]', output, 0 @@ -4268,27 +5199,27 @@ context 'Lists model' do test 'content should return items in list' do - input = <<-EOS -* one -* two -* three + input = <<~'EOS' + * one + * two + * three EOS doc = document_from_string input list = doc.blocks.first - assert list.is_a? Asciidoctor::List + assert_kind_of Asciidoctor::List, list items = list.items assert_equal 3, items.size assert_equal list.items, list.content end test 'list item should be the parent of block attached to a list item' do - input = <<-EOS -* list item 1 -+ ----- -listing block in list item 1 ----- + input = <<~'EOS' + * list item 1 + + + ---- + listing block in list item 1 + ---- EOS doc = document_from_string input @@ -4300,10 +5231,10 @@ end test 'outline? should return true for unordered list' do - input = <<-EOS -* one -* two -* three + input = <<~'EOS' + * one + * two + * three EOS doc = document_from_string input @@ -4312,10 +5243,10 @@ end test 'outline? should return true for ordered list' do - input = <<-EOS -. one -. two -. three + input = <<~'EOS' + . one + . two + . three EOS doc = document_from_string input @@ -4324,57 +5255,143 @@ end test 'outline? should return false for description list' do - input = <<-EOS -label:: desc - EOS - + input = 'label:: desc' doc = document_from_string input list = doc.blocks.first - assert !list.outline? + refute list.outline? end test 'simple? should return true for list item with no nested blocks' do - input = <<-EOS -* one -* two -* three + input = <<~'EOS' + * one + * two + * three EOS doc = document_from_string input list = doc.blocks.first assert list.items.first.simple? - assert !list.items.first.compound? + refute list.items.first.compound? end test 'simple? should return true for list item with nested outline list' do - input = <<-EOS -* one - ** more about one - ** and more -* two -* three + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + * one + ** more about one + ** and more + * two + * three EOS doc = document_from_string input list = doc.blocks.first assert list.items.first.simple? - assert !list.items.first.compound? + refute list.items.first.compound? end test 'simple? should return false for list item with block content' do - input = <<-EOS -* one -+ ----- -listing block in list item 1 ----- -* two -* three + input = <<~'EOS' + * one + + + ---- + listing block in list item 1 + ---- + * two + * three EOS doc = document_from_string input list = doc.blocks.first - assert !list.items.first.simple? + refute list.items.first.simple? assert list.items.first.compound? end + + test 'should allow text of ListItem to be assigned' do + input = <<~'EOS' + * one + * two + * three + EOS + + doc = document_from_string input + list = (doc.find_by context: :ulist).first + assert_equal 3, list.items.size + assert_equal 'one', list.items[0].text + list.items[0].text = 'un' + assert_equal 'un', list.items[0].text + end + + test 'id and role assigned to ulist item in model are transmitted to output' do + input = <<~'EOS' + * one + * two + * three + EOS + + doc = document_from_string input + item_0 = doc.blocks[0].items[0] + item_0.id = 'one' + item_0.add_role 'item' + output = doc.convert + assert_css 'li#one.item', output, 1 + end + + test 'id and role assigned to olist item in model are transmitted to output' do + input = <<~'EOS' + . one + . two + . three + EOS + + doc = document_from_string input + item_0 = doc.blocks[0].items[0] + item_0.id = 'one' + item_0.add_role 'item' + output = doc.convert + assert_css 'li#one.item', output, 1 + end + + test 'should allow API control over substitutions applied to ListItem text' do + input = <<~'EOS' + * *one* + * _two_ + * `three` + * #four# + EOS + + doc = document_from_string input + list = (doc.find_by context: :ulist).first + assert_equal 4, list.items.size + list.items[0].remove_sub :quotes + assert_equal '*one*', list.items[0].text + refute_includes list.items[0].subs, :quotes + list.items[1].subs.clear + assert_empty list.items[1].subs + assert_equal '_two_', list.items[1].text + list.items[2].subs.replace [:specialcharacters] + assert_equal [:specialcharacters], list.items[2].subs + assert_equal '`three`', list.items[2].text + assert_equal 'four', list.items[3].text + end + + test 'should set lineno to line number in source where list starts' do + input = <<~'EOS' + * bullet 1 + ** bullet 1.1 + *** bullet 1.1.1 + * bullet 2 + EOS + doc = document_from_string input, sourcemap: true + lists = doc.find_by context: :ulist + assert_equal 1, lists[0].lineno + assert_equal 2, lists[1].lineno + assert_equal 3, lists[2].lineno + + list_items = doc.find_by context: :list_item + assert_equal 1, list_items[0].lineno + assert_equal 2, list_items[1].lineno + assert_equal 3, list_items[2].lineno + assert_equal 4, list_items[3].lineno + end end diff -Nru asciidoctor-1.5.5/test/logger_test.rb asciidoctor-2.0.10/test/logger_test.rb --- asciidoctor-1.5.5/test/logger_test.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/logger_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,214 @@ +# frozen_string_literal: true +require_relative 'test_helper' + +context 'Logger' do + MyLogger = Class.new Logger + + context 'LoggerManager' do + test 'provides access to logger via static logger method' do + logger = Asciidoctor::LoggerManager.logger + refute_nil logger + assert_kind_of Logger, logger + end + + test 'allows logger instance to be changed' do + old_logger = Asciidoctor::LoggerManager.logger + new_logger = MyLogger.new $stdout + begin + Asciidoctor::LoggerManager.logger = new_logger + assert_same new_logger, Asciidoctor::LoggerManager.logger + ensure + Asciidoctor::LoggerManager.logger = old_logger + end + end + + test 'setting logger instance to falsy value resets instance to default logger' do + old_logger = Asciidoctor::LoggerManager.logger + begin + Asciidoctor::LoggerManager.logger = MyLogger.new $stdout + Asciidoctor::LoggerManager.logger = nil + refute_nil Asciidoctor::LoggerManager.logger + assert_kind_of Logger, Asciidoctor::LoggerManager.logger + ensure + Asciidoctor::LoggerManager.logger = old_logger + end + end + + test 'creates logger instance from static logger_class property' do + old_logger_class = Asciidoctor::LoggerManager.logger_class + old_logger = Asciidoctor::LoggerManager.logger + begin + Asciidoctor::LoggerManager.logger_class = MyLogger + Asciidoctor::LoggerManager.logger = nil + refute_nil Asciidoctor::LoggerManager.logger + assert_kind_of MyLogger, Asciidoctor::LoggerManager.logger + ensure + Asciidoctor::LoggerManager.logger_class = old_logger_class + Asciidoctor::LoggerManager.logger = old_logger + end + end + end + + context 'Logger' do + test 'configures default logger with progname set to asciidoctor' do + assert_equal 'asciidoctor', Asciidoctor::LoggerManager.logger.progname + end + + test 'configures default logger with level set to WARN' do + assert_equal Logger::Severity::WARN, Asciidoctor::LoggerManager.logger.level + end + + test 'configures default logger to write messages to $stderr' do + out_string, err_string = redirect_streams do |out, err| + Asciidoctor::LoggerManager.logger.warn 'this is a call' + [out.string, err.string] + end + assert_empty out_string + refute_empty err_string + assert_includes err_string, 'this is a call' + end + + test 'configures default logger to use a formatter that matches traditional format' do + err_string = redirect_streams do |_, err| + Asciidoctor::LoggerManager.logger.warn 'this is a call' + Asciidoctor::LoggerManager.logger.fatal 'it cannot be done' + err.string + end + assert_includes err_string, %(asciidoctor: WARNING: this is a call) + assert_includes err_string, %(asciidoctor: FAILED: it cannot be done) + end + + test 'NullLogger level is not nil' do + logger = Asciidoctor::NullLogger.new + refute_nil logger.level + assert_equal Logger::WARN, logger.level + end + end + + context ':logger API option' do + test 'should be able to set logger when invoking load API' do + old_logger = Asciidoctor::LoggerManager.logger + new_logger = MyLogger.new $stdout + begin + Asciidoctor.load 'contents', logger: new_logger + assert_same new_logger, Asciidoctor::LoggerManager.logger + ensure + Asciidoctor::LoggerManager.logger = old_logger + end + end + + test 'should be able to set logger when invoking load_file API' do + old_logger = Asciidoctor::LoggerManager.logger + new_logger = MyLogger.new $stdout + begin + Asciidoctor.load_file fixture_path('basic.adoc'), logger: new_logger + assert_same new_logger, Asciidoctor::LoggerManager.logger + ensure + Asciidoctor::LoggerManager.logger = old_logger + end + end + + test 'should be able to set logger when invoking convert API' do + old_logger = Asciidoctor::LoggerManager.logger + new_logger = MyLogger.new $stdout + begin + Asciidoctor.convert 'contents', logger: new_logger + assert_same new_logger, Asciidoctor::LoggerManager.logger + ensure + Asciidoctor::LoggerManager.logger = old_logger + end + end + + test 'should be able to set logger when invoking convert_file API' do + old_logger = Asciidoctor::LoggerManager.logger + new_logger = MyLogger.new $stdout + begin + Asciidoctor.convert_file fixture_path('basic.adoc'), to_file: false, logger: new_logger + assert_same new_logger, Asciidoctor::LoggerManager.logger + ensure + Asciidoctor::LoggerManager.logger = old_logger + end + end + end + + context 'Logging' do + test 'including Logging gives instance methods on module access to logging infrastructure' do + module SampleModuleA + include Asciidoctor::Logging + def get_logger + logger + end + end + + class SampleClassA + include SampleModuleA + end + assert_same Asciidoctor::LoggerManager.logger, SampleClassA.new.get_logger + assert SampleClassA.public_method_defined? :logger + end + + test 'including Logging gives static methods on module access to logging infrastructure' do + module SampleModuleB + include Asciidoctor::Logging + def self.get_logger + logger + end + end + + assert_same Asciidoctor::LoggerManager.logger, SampleModuleB.get_logger + end + + test 'including Logging gives instance methods on class access to logging infrastructure' do + class SampleClassC + include Asciidoctor::Logging + def get_logger + logger + end + end + + assert_same Asciidoctor::LoggerManager.logger, SampleClassC.new.get_logger + assert SampleClassC.public_method_defined? :logger + end + + test 'including Logging gives static methods on class access to logging infrastructure' do + class SampleClassD + include Asciidoctor::Logging + def self.get_logger + logger + end + end + + assert_same Asciidoctor::LoggerManager.logger, SampleClassD.get_logger + end + + test 'can create an auto-formatting message with context' do + class SampleClassE + include Asciidoctor::Logging + def create_message cursor + message_with_context 'Asciidoctor was here', source_location: cursor + end + end + + cursor = Asciidoctor::Reader::Cursor.new 'file.adoc', fixturedir, 'file.adoc', 5 + message = SampleClassE.new.create_message cursor + assert_equal 'Asciidoctor was here', message[:text] + assert_same cursor, message[:source_location] + assert_equal 'file.adoc: line 5: Asciidoctor was here', message.inspect + end + + test 'writes message prefixed with program name and source location to stderr' do + input = <<~'EOS' + [#first] + first paragraph + + [#first] + another first paragraph + EOS + messages = redirect_streams do |_, err| + convert_string_to_embedded input + err.string.chomp + end + assert_equal 'asciidoctor: WARNING: : line 5: id assigned to block already in use: first', messages + end + end +end diff -Nru asciidoctor-1.5.5/test/manpage_test.rb asciidoctor-2.0.10/test/manpage_test.rb --- asciidoctor-1.5.5/test/manpage_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/manpage_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,203 +1,733 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' -SAMPLE_MANPAGE_HEADER = <<-EOS.chomp -= command (1) -Author Name -:doctype: manpage -:man manual: Command Manual -:man source: Command 1.2.3 +context 'Manpage' do + SAMPLE_MANPAGE_HEADER = <<~'EOS'.chop + = command (1) + Author Name + :doctype: manpage + :man manual: Command Manual + :man source: Command 1.2.3 -== NAME + == NAME -command - does stuff + command - does stuff -== SYNOPSIS + == SYNOPSIS -*command* [_OPTION_]... _FILE_... + *command* [_OPTION_]... _FILE_... -== DESCRIPTION -EOS + == DESCRIPTION + EOS -context 'Manpage' do context 'Configuration' do + test 'should set proper manpage-related attributes' do + input = SAMPLE_MANPAGE_HEADER + doc = Asciidoctor.load input, backend: :manpage + assert_equal 'man', doc.attributes['filetype'] + assert_equal '', doc.attributes['filetype-man'] + assert_equal '1', doc.attributes['manvolnum'] + assert_equal '.1', doc.attributes['outfilesuffix'] + assert_equal 'command', doc.attributes['manname'] + assert_equal 'command', doc.attributes['mantitle'] + assert_equal 'does stuff', doc.attributes['manpurpose'] + assert_equal 'command', doc.attributes['docname'] + end + + test 'should output multiple mannames in NAME section' do + input = SAMPLE_MANPAGE_HEADER.sub(/^command - /, 'command, alt_command - ') + output = Asciidoctor.convert input, backend: :manpage, standalone: true + assert_includes output.lines, %(command, alt_command \\- does stuff\n) + end + + test 'should not parse NAME section if manname and manpurpose attributes are set' do + input = <<~'EOS' + = foobar (1) + Author Name + :doctype: manpage + :man manual: Foo Bar Manual + :man source: Foo Bar 1.0 + + == SYNOPSIS + + *foobar* [_OPTIONS_]... + + == DESCRIPTION + + When you need to put some foo on the bar. + EOS + + attrs = { 'manname' => 'foobar', 'manpurpose' => 'puts some foo on the bar' } + doc = Asciidoctor.load input, backend: :manpage, standalone: true, attributes: attrs + assert_equal 'foobar', (doc.attr 'manname') + assert_equal ['foobar'], (doc.attr 'mannames') + assert_equal 'puts some foo on the bar', (doc.attr 'manpurpose') + assert_equal 'SYNOPSIS', doc.sections[0].title + end + + test 'should normalize whitespace and skip line comments before and inside NAME section' do + input = <<~'EOS' + = foobar (1) + Author Name + :doctype: manpage + :man manual: Foo Bar Manual + :man source: Foo Bar 1.0 + + // this is the name section + == NAME + + // it follows the form `name - description` + foobar - puts some foo + on the bar + // a little bit of this, a little bit of that + + == SYNOPSIS + + *foobar* [_OPTIONS_]... + + == DESCRIPTION + + When you need to put some foo on the bar. + EOS + + doc = Asciidoctor.load input, backend: :manpage, standalone: true + assert_equal 'puts some foo on the bar', (doc.attr 'manpurpose') + end + + test 'should parse malformed document with warnings' do + input = 'garbage in' + using_memory_logger do |logger| + doc = Asciidoctor.load input, backend: :manpage, standalone: true, attributes: { 'docname' => 'cmd' } + assert_equal 'cmd', doc.attr('manname') + assert_equal ['cmd'], doc.attr('mannames') + assert_equal '.1', doc.attr('outfilesuffix') + output = doc.convert + refute logger.messages.empty? + assert_includes output, 'Title: cmd' + assert output.end_with?('garbage in') + end + end + + test 'should warn if document title is non-conforming' do + input = <<~'EOS' + = command + + == Name + + command - does stuff + EOS + + using_memory_logger do |logger| + document_from_string input, backend: :manpage + assert_message logger, :ERROR, ': line 1: non-conforming manpage title', Hash + end + end + + test 'should warn if first section is not name section' do + input = <<~'EOS' + = command(1) + + == Synopsis + + Does stuff. + EOS + + using_memory_logger do |logger| + doc = document_from_string input, backend: :manpage + assert_message logger, :ERROR, ': line 3: non-conforming name section body', Hash + refute_nil doc.sections[0] + assert_equal 'Synopsis', doc.sections[0].title + end + end + test 'should define default linkstyle' do input = SAMPLE_MANPAGE_HEADER - output = Asciidoctor.convert input, :backend => :manpage, :header_footer => true - assert_match(/^\.LINKSTYLE blue R < >$/, output) + output = Asciidoctor.convert input, backend: :manpage, standalone: true + assert_includes output.lines, %(. LINKSTYLE blue R < >\n) end test 'should use linkstyle defined by man-linkstyle attribute' do input = SAMPLE_MANPAGE_HEADER - output = Asciidoctor.convert input, :backend => :manpage, :header_footer => true, - :attributes => { 'man-linkstyle' => 'cyan B \[fo] \[fc]' } - assert_match(/^\.LINKSTYLE cyan B \\\[fo\] \\\[fc\]$/, output) + output = Asciidoctor.convert input, backend: :manpage, standalone: true, attributes: { 'man-linkstyle' => 'cyan B \[fo] \[fc]' } + assert_includes output.lines, %(. LINKSTYLE cyan B \\[fo] \\[fc]\n) + end + + test 'should require specialchars in value of man-linkstyle attribute defined in document to be escaped' do + input = <<~EOS.chop + :man-linkstyle: cyan R < > + #{SAMPLE_MANPAGE_HEADER} + EOS + output = Asciidoctor.convert input, backend: :manpage, standalone: true + assert_includes output.lines, %(. LINKSTYLE cyan R < >\n) + + input = <<~EOS.chop + :man-linkstyle: pass:[cyan R < >] + #{SAMPLE_MANPAGE_HEADER} + EOS + output = Asciidoctor.convert input, backend: :manpage, standalone: true + assert_includes output.lines, %(. LINKSTYLE cyan R < >\n) end end context 'Manify' do + test 'should unescape literal ampersand' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + (C) & (R) are translated to character references, but not the &. + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal '\\(co & \\(rg are translated to character references, but not the &.', output.lines.last.chomp + end + + test 'should replace em dashes' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + go -- to + + go--to + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_includes output, 'go \\(em to' + assert_includes output, 'go\\(emto' + end + test 'should escape lone period' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} -.) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal '\&.', output.lines.entries.last.chomp + . + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal '\&.', output.lines.last.chomp end test 'should escape raw macro' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + AAA this line of text should be show + .if 1 .nx + BBB this line and the one above it should be visible + EOS + + output = Asciidoctor.convert input, backend: :manpage + assert_equal '\&.if 1 .nx', output.lines[-2].chomp + end + + test 'should normalize whitespace in a paragraph' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + Oh, here it goes again + I should have known, + should have known, + should have known again + EOS + + output = Asciidoctor.convert input, backend: :manpage + assert_includes output, %(Oh, here it goes again\nI should have known,\nshould have known,\nshould have known again) + end -AAA this line of text should be show -.if 1 .nx -BBB this line and the one above it should be visible) + test 'should normalize whitespace in a list item' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} - output = Asciidoctor.convert input, :backend => :manpage - assert_equal '\&.if 1 .nx', output.lines.entries[-2].chomp + * Oh, here it goes again + I should have known, + should have known, + should have known again + EOS + + output = Asciidoctor.convert input, backend: :manpage + assert_includes output, %(Oh, here it goes again\nI should have known,\nshould have known,\nshould have known again) + end + + test 'should collapse whitespace in the man manual and man source' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + Describe this thing. + EOS + + output = Asciidoctor.convert input, backend: :manpage, standalone: true, attributes: { + 'manmanual' => %(General\nCommands\nManual), + 'mansource' => %(Control\nAll\nThe\nThings\n5.0), + } + assert_includes output, 'Manual: General Commands Manual' + assert_includes output, 'Source: Control All The Things 5.0' + assert_includes output, '"Control All The Things 5.0" "General Commands Manual"' end end context 'Backslash' do test 'should not escape spaces for empty manual or source fields' do input = SAMPLE_MANPAGE_HEADER.lines.select {|l| !l.start_with?(':man ') } - output = Asciidoctor.convert input, :backend => :manpage, :header_footer => true + output = Asciidoctor.convert input, backend: :manpage, standalone: true assert_match ' Manual: \ \&', output assert_match ' Source: \ \&', output assert_match(/^\.TH "COMMAND" .* "\\ \\&" "\\ \\&"$/, output) end test 'should preserve backslashes in escape sequences' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} -"`hello`" '`goodbye`' *strong* _weak_ `even`) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal '\(lqhello\(rq \(oqgoodbye\(cq \fBstrong\fP \fIweak\fP \f[CR]even\fP', output.lines.entries.last.chomp + "`hello`" '`goodbye`' *strong* _weak_ `even` + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal '\(lqhello\(rq \(oqgoodbye\(cq \fBstrong\fP \fIweak\fP \f(CReven\fP', output.lines.last.chomp end test 'should escape backslashes in content' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} -\\.foo \\ bar\\ -baz) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal '\(rs.foo \(rs bar\(rs', output.lines.entries[-2].chomp + \\.foo \\ bar\\ + baz + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal '\(rs.foo \(rs bar\(rs', output.lines[-2].chomp end test 'should escape literal escape sequence' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} - \\fB makes text bold) - output = Asciidoctor.convert input, :backend => :manpage + \\fB makes text bold + EOS + output = Asciidoctor.convert input, backend: :manpage assert_match '\(rsfB makes text bold', output end + + test 'should preserve inline breaks' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + Before break. + + After break. + EOS + expected = <<~'EOS'.chop + Before break. + .br + After break. + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-3..-1].join + end end context 'URL macro' do test 'should not leave blank line before URL macro' do - input = %(#{SAMPLE_MANPAGE_HEADER} -First paragraph. - -http://asciidoc.org[AsciiDoc]) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal '.sp -First paragraph. -.sp -.URL "http://asciidoc.org" "AsciiDoc" ""', output.lines.entries[-4..-1].join + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + First paragraph. + + http://asciidoc.org[AsciiDoc] + EOS + expected = <<~'EOS'.chop + .sp + First paragraph. + .sp + .URL "http://asciidoc.org" "AsciiDoc" "" + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-4..-1].join end test 'should not swallow content following URL' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} -http://asciidoc.org[AsciiDoc] can be used to create man pages.) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal '.URL "http://asciidoc.org" "AsciiDoc" " " -can be used to create man pages.', output.lines.entries[-2..-1].join + http://asciidoc.org[AsciiDoc] can be used to create man pages. + EOS + expected = <<~'EOS'.chop + .URL "http://asciidoc.org" "AsciiDoc" " " + can be used to create man pages. + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-2..-1].join end test 'should pass adjacent character as final argument of URL macro' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} -This is http://asciidoc.org[AsciiDoc].) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal 'This is \c -.URL "http://asciidoc.org" "AsciiDoc" "."', output.lines.entries[-2..-1].join + This is http://asciidoc.org[AsciiDoc]. + EOS + expected = <<~'EOS'.chop + This is \c + .URL "http://asciidoc.org" "AsciiDoc" "." + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-2..-1].join end test 'should pass adjacent character as final argument of URL macro and move trailing content to next line' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} -This is http://asciidoc.org[AsciiDoc], which can be used to write content.) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal 'This is \c -.URL "http://asciidoc.org" "AsciiDoc" "," -which can be used to write content.', output.lines.entries[-3..-1].join + This is http://asciidoc.org[AsciiDoc], which can be used to write content. + EOS + expected = <<~'EOS'.chop + This is \c + .URL "http://asciidoc.org" "AsciiDoc" "," + which can be used to write content. + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-3..-1].join end test 'should not leave blank lines between URLs on contiguous lines of input' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} -The corresponding implementations are -http://clisp.sf.net[CLISP], -http://ccl.clozure.com[Clozure CL], -http://cmucl.org[CMUCL], -http://ecls.sf.net[ECL], -and http://sbcl.sf.net[SBCL].) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal '.sp -The corresponding implementations are -.URL "http://clisp.sf.net" "CLISP" "," -.URL "http://ccl.clozure.com" "Clozure CL" "," -.URL "http://cmucl.org" "CMUCL" "," -.URL "http://ecls.sf.net" "ECL" "," -and \c -.URL "http://sbcl.sf.net" "SBCL" "."', output.lines.entries[-8..-1].join + The corresponding implementations are + http://clisp.sf.net[CLISP], + http://ccl.clozure.com[Clozure CL], + http://cmucl.org[CMUCL], + http://ecls.sf.net[ECL], + and http://sbcl.sf.net[SBCL]. + EOS + expected = <<~'EOS'.chop + .sp + The corresponding implementations are + .URL "http://clisp.sf.net" "CLISP" "," + .URL "http://ccl.clozure.com" "Clozure CL" "," + .URL "http://cmucl.org" "CMUCL" "," + .URL "http://ecls.sf.net" "ECL" "," + and \c + .URL "http://sbcl.sf.net" "SBCL" "." + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-8..-1].join end test 'should not leave blank lines between URLs on same line of input' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} -The corresponding implementations are http://clisp.sf.net[CLISP], http://ccl.clozure.com[Clozure CL], http://cmucl.org[CMUCL], http://ecls.sf.net[ECL], and http://sbcl.sf.net[SBCL].) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal '.sp -The corresponding implementations are \c -.URL "http://clisp.sf.net" "CLISP" "," -.URL "http://ccl.clozure.com" "Clozure CL" "," -.URL "http://cmucl.org" "CMUCL" "," -.URL "http://ecls.sf.net" "ECL" "," -and -.URL "http://sbcl.sf.net" "SBCL" "."', output.lines.entries[-8..-1].join + The corresponding implementations are http://clisp.sf.net[CLISP], http://ccl.clozure.com[Clozure CL], http://cmucl.org[CMUCL], http://ecls.sf.net[ECL], and http://sbcl.sf.net[SBCL]. + EOS + expected = <<~'EOS'.chop + .sp + The corresponding implementations are \c + .URL "http://clisp.sf.net" "CLISP" "," + .URL "http://ccl.clozure.com" "Clozure CL" "," + .URL "http://cmucl.org" "CMUCL" "," + .URL "http://ecls.sf.net" "ECL" "," + and + .URL "http://sbcl.sf.net" "SBCL" "." + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-8..-1].join end test 'should not insert space between link and non-whitespace characters surrounding it' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} -Please search |link:http://discuss.asciidoctor.org[the forums]| before asking.) - output = Asciidoctor.convert input, :backend => :manpage - assert_equal '.sp -Please search |\c -.URL "http://discuss.asciidoctor.org" "the forums" "|" -before asking.', output.lines.entries[-4..-1].join + Please search |link:http://discuss.asciidoctor.org[the forums]| before asking. + EOS + expected = <<~'EOS'.chop + .sp + Please search |\c + .URL "http://discuss.asciidoctor.org" "the forums" "|" + before asking. + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-4..-1].join + end + + test 'should be able to use monospaced text inside a link' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + Enter the link:cat[`cat`] command. + EOS + expected = <<~'EOS'.chop + .sp + Enter the \c + .URL "cat" "\f(CRcat\fP" " " + command. + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-4..-1].join + end + end + + context 'MTO macro' do + test 'should convert inline email macro into MTO macro' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + First paragraph. + + mailto:doc@example.org[Contact the doc] + EOS + expected = <<~'EOS'.chop + .sp + First paragraph. + .sp + .MTO "doc\(atexample.org" "Contact the doc" "" + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_equal expected, output.lines[-4..-1].join + end + + test 'should set text of MTO macro to blank for implicit email' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + Bugs fixed daily by doc@example.org. + EOS + expected_coda = <<~'EOS'.chop + Bugs fixed daily by \c + .MTO "doc\(atexample.org" "" "." + EOS + output = Asciidoctor.convert input, backend: :manpage + assert output.end_with? expected_coda + end + end + + context 'Table' do + test 'should create header, body, and footer rows in correct order' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + [%header%footer] + |=== + |Header + |Body 1 + |Body 2 + |Footer + |=== + EOS + expected_coda = <<~'EOS'.chop + allbox tab(:); + lt. + T{ + .sp + Header + T} + T{ + .sp + Body 1 + T} + T{ + .sp + Body 2 + T} + T{ + .sp + Footer + T} + .TE + .sp + EOS + output = Asciidoctor.convert input, backend: :manpage + assert output.end_with? expected_coda + end + + test 'should manify normal table cell content' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + |=== + |*Col A* |_Col B_ + + |*bold* |`mono` + |_italic_ | #mark# + |=== + EOS + output = Asciidoctor.convert input, backend: :manpage + refute_match(/<\/?BOUNDARY>/, output) + end + + test 'should manify table title' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + .Table of options + |=== + | Name | Description | Default + + | dim + | dimension of the object + | 3 + |=== + EOS + expected_coda = <<~'EOS'.chop + .it 1 an-trap + .nr an-no-space-flag 1 + .nr an-break-flag 1 + .br + .B Table 1. Table of options + .TS + allbox tab(:); + lt lt lt. + T{ + .sp + Name + T}:T{ + .sp + Description + T}:T{ + .sp + Default + T} + T{ + .sp + dim + T}:T{ + .sp + dimension of the object + T}:T{ + .sp + 3 + T} + .TE + .sp + EOS + output = Asciidoctor.convert input, backend: :manpage + assert output.end_with? expected_coda + end + + test 'should manify and preserve whitespace in literal table cell' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + |=== + |a l|b + c _d_ + . + |=== + EOS + expected_coda = <<~'EOS'.chop + .TS + allbox tab(:); + lt lt. + T{ + .sp + a + T}:T{ + .sp + .nf + b + c _d_ + \&. + .fi + T} + .TE + .sp + EOS + output = Asciidoctor.convert input, backend: :manpage + assert output.end_with? expected_coda + end + end + + context 'Images' do + test 'should replace block image with alt text enclosed in square brackets' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + Behold the wisdom of the Magic 8 Ball! + + image::signs-point-to-yes.jpg[] + EOS + + output = Asciidoctor.convert input, backend: :manpage + assert output.end_with? %(\n.sp\n[signs point to yes]) + end + + test 'should replace inline image with alt text enclosed in square brackets' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + The Magic 8 Ball says image:signs-point-to-yes.jpg[]. + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_includes output, 'The Magic 8 Ball says [signs point to yes].' + end + + test 'should place link after alt text for inline image if link is defined' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + The Magic 8 Ball says image:signs-point-to-yes.jpg[link=https://en.wikipedia.org/wiki/Magic_8-Ball]. + EOS + output = Asciidoctor.convert input, backend: :manpage + assert_includes output, 'The Magic 8 Ball says [signs point to yes] .' + end + + test 'should reference image with title usign styled xref' do + input = <<~EOS.chomp + #{SAMPLE_MANPAGE_HEADER} + + To get your fortune, see <>. + + .Magic 8-Ball + [#magic-8-ball] + image::signs-point-to-yes.jpg[] + EOS + output = Asciidoctor.convert input, backend: :manpage, attributes: { 'xrefstyle' => 'full' } + lines = output.lines.map(&:chomp) + assert_includes lines, 'To get your fortune, see Figure 1, \(lqMagic 8\-Ball\(rq.' + assert_includes lines, '.B Figure 1. Magic 8\-Ball' + end + end + + context 'Quote Block' do + test 'should indent quote block' do + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} + + [,James Baldwin] + ____ + Not everything that is faced can be changed. + But nothing can be changed until it is faced. + ____ + EOS + expected_coda = <<~'EOS'.chop + .RS 3 + .ll -.6i + .sp + Not everything that is faced can be changed. + But nothing can be changed until it is faced. + .br + .RE + .ll + .RS 5 + .ll -.10i + \(em James Baldwin + .RE + .ll + EOS + output = Asciidoctor.convert input, backend: :manpage + assert output.end_with? expected_coda end end context 'Callout List' do test 'should generate callout list using proper formatting commands' do - input = %(#{SAMPLE_MANPAGE_HEADER} + input = <<~EOS.chop + #{SAMPLE_MANPAGE_HEADER} ----- -$ gem install asciidoctor # <1> ----- -<1> Installs the asciidoctor gem from RubyGems.org) - output = Asciidoctor.convert input, :backend => :manpage - assert output.end_with? '.TS -tab(:); -r lw(\n(.lu*75u/100u). -\fB(1)\fP\h\'-2n\':T{ -Installs the asciidoctor gem from RubyGems.org -T} -.TE' + ---- + $ gem install asciidoctor # <1> + ---- + <1> Installs the asciidoctor gem from RubyGems.org + EOS + expected_coda = <<~'EOS'.chop + .TS + tab(:); + r lw(\n(.lu*75u/100u). + \fB(1)\fP\h'-2n':T{ + Installs the asciidoctor gem from RubyGems.org + T} + .TE + EOS + output = Asciidoctor.convert input, backend: :manpage + assert output.end_with? expected_coda end end @@ -206,13 +736,33 @@ old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' begin ENV['SOURCE_DATE_EPOCH'] = '1234123412' - output = Asciidoctor.convert SAMPLE_MANPAGE_HEADER, :backend => :manpage, :header_footer => true + output = Asciidoctor.convert SAMPLE_MANPAGE_HEADER, backend: :manpage, standalone: true assert_match(/Date: 2009-02-08/, output) assert_match(/^\.TH "COMMAND" "1" "2009-02-08" "Command 1.2.3" "Command Manual"$/, output) ensure - ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch if old_source_date_epoch + if old_source_date_epoch + ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch + else + ENV.delete 'SOURCE_DATE_EPOCH' + end end end - end + test 'should fail if SOURCE_DATE_EPOCH is malformed' do + old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' + begin + ENV['SOURCE_DATE_EPOCH'] = 'aaaaaaaa' + Asciidoctor.convert SAMPLE_MANPAGE_HEADER, backend: :manpage, standalone: true + assert false + rescue + assert true + ensure + if old_source_date_epoch + ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch + else + ENV.delete 'SOURCE_DATE_EPOCH' + end + end + end + end end diff -Nru asciidoctor-1.5.5/test/options_test.rb asciidoctor-2.0.10/test/options_test.rb --- asciidoctor-1.5.5/test/options_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/options_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,12 +1,9 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end -require 'asciidoctor/cli/options' +# frozen_string_literal: true +require_relative 'test_helper' +require File.join Asciidoctor::LIB_DIR, 'asciidoctor/cli/options' context 'Options' do - test 'should return error code 0 when help flag is present' do + test 'should print usage and return error code 0 when help flag is present' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(-h)) assert_equal 0, exitval @@ -14,6 +11,50 @@ end end + test 'should print usage and return error code 0 when help flag is unknown' do + exitval, output = redirect_streams do |out, _| + [Asciidoctor::Cli::Options.parse!(%w(-h unknown)), out.string] + end + assert_equal 0, exitval + assert_match(/^Usage:/, output) + end + + test 'should dump man page and return error code 0 when help topic is manpage' do + exitval, output = redirect_streams do |out, _| + [Asciidoctor::Cli::Options.parse!(%w(-h manpage)), out.string] + end + assert_equal 0, exitval + assert_includes output, 'Manual: Asciidoctor Manual' + assert_includes output, '.TH "ASCIIDOCTOR"' + end + + test 'should an overview of the AsciiDoc syntax and return error code 0 when help topic is syntax' do + exitval, output = redirect_streams do |out, _| + [Asciidoctor::Cli::Options.parse!(%w(-h syntax)), out.string] + end + assert_equal 0, exitval + assert_includes output, '= AsciiDoc Syntax' + assert_includes output, '== Text Formatting' + end + + test 'should print message and return error code 1 when manpage is not found' do + old_manpage_path = ENV['ASCIIDOCTOR_MANPAGE_PATH'] + begin + ENV['ASCIIDOCTOR_MANPAGE_PATH'] = (manpage_path = fixture_path 'no-such-file.1') + redirect_streams do |out, stderr| + exitval = Asciidoctor::Cli::Options.parse!(%w(-h manpage)) + assert_equal 1, exitval + assert_equal %(asciidoctor: FAILED: manual page not found: #{manpage_path}), stderr.string.chomp + end + ensure + if old_manpage_path + ENV['ASCIIDOCTOR_MANPAGE_PATH'] = old_manpage_path + else + ENV.delete 'ASCIIDOCTOR_MANPAGE_PATH' + end + end + end + test 'should return error code 1 when invalid option present' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(--foobar)) @@ -41,86 +82,98 @@ test 'should emit warning when unparsed options remain' do redirect_streams do |stdout, stderr| options = Asciidoctor::Cli::Options.parse!(%w(-b docbook - -)) - assert options.is_a? Hash + assert_kind_of Hash, options assert_match(/asciidoctor: WARNING: extra arguments .*/, stderr.string.chomp) end end test 'basic argument assignment' do - options = Asciidoctor::Cli::Options.parse!(%w(-v -s -d book test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-w -v -s -d book test/fixtures/sample.adoc)) assert_equal 2, options[:verbose] - assert_equal false, options[:header_footer] + assert_equal false, options[:standalone] assert_equal 'book', options[:attributes]['doctype'] assert_equal 1, options[:input_files].size - assert_equal 'test/fixtures/sample.asciidoc', options[:input_files][0] + assert_equal 'test/fixtures/sample.adoc', options[:input_files][0] end test 'standard attribute assignment' do - options = Asciidoctor::Cli::Options.parse!(%w(-a docinfosubs=attributes,replacements -a icons test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-a docinfosubs=attributes,replacements -a icons test/fixtures/sample.adoc)) assert_equal 'attributes,replacements', options[:attributes]['docinfosubs'] assert_equal '', options[:attributes]['icons'] end test 'multiple attribute arguments' do - options = Asciidoctor::Cli::Options.parse!(%w(-a imagesdir=images -a icons test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-a imagesdir=images -a icons test/fixtures/sample.adoc)) assert_equal 'images', options[:attributes]['imagesdir'] assert_equal '', options[:attributes]['icons'] end test 'should only split attribute key/value pairs on first equal sign' do - options = Asciidoctor::Cli::Options.parse!(%w(-a name=value=value test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-a name=value=value test/fixtures/sample.adoc)) assert_equal 'value=value', options[:attributes]['name'] end + test 'should not fail if value of attribute option is empty' do + options = Asciidoctor::Cli::Options.parse!(['-a', '', 'test/fixtures/sample.adoc']) + + assert_nil options[:attributes] + end + + test 'should not fail if value of attribute option is equal sign' do + options = Asciidoctor::Cli::Options.parse!(['-a', '=', 'test/fixtures/sample.adoc']) + + assert_nil options[:attributes] + end + test 'should allow safe mode to be specified' do - options = Asciidoctor::Cli::Options.parse!(%w(-S safe test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-S safe test/fixtures/sample.adoc)) assert_equal Asciidoctor::SafeMode::SAFE, options[:safe] end test 'should allow any backend to be specified' do - options = Asciidoctor::Cli::Options.parse!(%w(-b my_custom_backend test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-b my_custom_backend test/fixtures/sample.adoc)) assert_equal 'my_custom_backend', options[:attributes]['backend'] end test 'article doctype assignment' do - options = Asciidoctor::Cli::Options.parse!(%w(-d article test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-d article test/fixtures/sample.adoc)) assert_equal 'article', options[:attributes]['doctype'] end test 'book doctype assignment' do - options = Asciidoctor::Cli::Options.parse!(%w(-d book test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-d book test/fixtures/sample.adoc)) assert_equal 'book', options[:attributes]['doctype'] end test 'inline doctype assignment' do - options = Asciidoctor::Cli::Options.parse!(%w(-d inline test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-d inline test/fixtures/sample.adoc)) assert_equal 'inline', options[:attributes]['doctype'] end test 'template engine assignment' do - options = Asciidoctor::Cli::Options.parse!(%w(-E haml test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-E haml test/fixtures/sample.adoc)) assert_equal 'haml', options[:template_engine] end test 'template directory assignment' do - options = Asciidoctor::Cli::Options.parse!(%w(-T custom-backend test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-T custom-backend test/fixtures/sample.adoc)) assert_equal ['custom-backend'], options[:template_dirs] end test 'multiple template directory assignments' do - options = Asciidoctor::Cli::Options.parse!(%w(-T custom-backend -T custom-backend-hacks test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-T custom-backend -T custom-backend-hacks test/fixtures/sample.adoc)) assert_equal ['custom-backend', 'custom-backend-hacks'], options[:template_dirs] end test 'multiple -r flags requires specified libraries' do options = Asciidoctor::Cli::Options.new redirect_streams do |stdout, stderr| - exitval = options.parse! %w(-r foobar -r foobaz test/fixtures/sample.asciidoc) + exitval = options.parse! %w(-r foobar -r foobaz test/fixtures/sample.adoc) assert_match(%(asciidoctor: FAILED: 'foobar' could not be loaded), stderr.string) assert_equal 1, exitval assert_equal ['foobar', 'foobaz'], options[:requires] @@ -130,7 +183,7 @@ test '-r flag with multiple values requires specified libraries' do options = Asciidoctor::Cli::Options.new redirect_streams do |stdout, stderr| - exitval = options.parse! %w(-r foobar,foobaz test/fixtures/sample.asciidoc) + exitval = options.parse! %w(-r foobar,foobaz test/fixtures/sample.adoc) assert_match(%(asciidoctor: FAILED: 'foobar' could not be loaded), stderr.string) assert_equal 1, exitval assert_equal ['foobar', 'foobaz'], options[:requires] @@ -141,7 +194,7 @@ options = Asciidoctor::Cli::Options.new old_load_path = $LOAD_PATH.dup begin - exitval = options.parse! %w(-I foobar -I foobaz test/fixtures/sample.asciidoc) + exitval = options.parse! %w(-I foobar -I foobaz test/fixtures/sample.adoc) refute_equal 1, exitval assert_equal old_load_path.size + 2, $LOAD_PATH.size assert_equal File.expand_path('foobar'), $LOAD_PATH[0] @@ -156,7 +209,7 @@ options = Asciidoctor::Cli::Options.new old_load_path = $LOAD_PATH.dup begin - exitval = options.parse! %W(-I foobar#{File::PATH_SEPARATOR}foobaz test/fixtures/sample.asciidoc) + exitval = options.parse! %W(-I foobar#{File::PATH_SEPARATOR}foobaz test/fixtures/sample.adoc) refute_equal 1, exitval assert_equal old_load_path.size + 2, $LOAD_PATH.size assert_equal File.expand_path('foobar'), $LOAD_PATH[0] @@ -167,33 +220,65 @@ end end + test 'should set failure level to FATAL by default' do + options = Asciidoctor::Cli::Options.parse! %W(test/fixtures/sample.adoc) + assert_equal ::Logger::Severity::FATAL, options[:failure_level] + end + + test 'should allow failure level to be set to WARN' do + %w(w warn WARN warning WARNING).each do |val| + options = Asciidoctor::Cli::Options.parse!(%W(--failure-level=#{val} test/fixtures/sample.adoc)) + assert_equal ::Logger::Severity::WARN, options[:failure_level] + end + end + + test 'should allow failure level to be set to ERROR' do + %w(e err ERR error ERROR).each do |val| + options = Asciidoctor::Cli::Options.parse!(%W(--failure-level=#{val} test/fixtures/sample.adoc)) + assert_equal ::Logger::Severity::ERROR, options[:failure_level] + end + end + + test 'should not allow failure level to be set to unknown value' do + exit_code, messages = redirect_streams do |_, err| + [(Asciidoctor::Cli::Options.parse! %W(--failure-level=foobar test/fixtures/sample.adoc)), err.string] + end + assert_equal 1, exit_code + assert_includes messages, 'invalid argument: --failure-level=foobar' + end + test 'should set verbose to 2 when -v flag is specified' do - options = Asciidoctor::Cli::Options.parse!(%w(-v test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-v test/fixtures/sample.adoc)) assert_equal 2, options[:verbose] end test 'should set verbose to 0 when -q flag is specified' do - options = Asciidoctor::Cli::Options.parse!(%w(-q test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-q test/fixtures/sample.adoc)) assert_equal 0, options[:verbose] end test 'should set verbose to 2 when -v flag is specified after -q flag' do - options = Asciidoctor::Cli::Options.parse!(%w(-q -v test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-q -v test/fixtures/sample.adoc)) assert_equal 2, options[:verbose] end test 'should set verbose to 0 when -q flag is specified after -v flag' do - options = Asciidoctor::Cli::Options.parse!(%w(-v -q test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-v -q test/fixtures/sample.adoc)) assert_equal 0, options[:verbose] end + test 'should enable warnings when -w flag is specified' do + options = Asciidoctor::Cli::Options.parse!(%w(-w test/fixtures/sample.adoc)) + assert options[:warnings] + end + test 'should enable timings when -t flag is specified' do - options = Asciidoctor::Cli::Options.parse!(%w(-t test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(-t test/fixtures/sample.adoc)) assert_equal true, options[:timings] end test 'timings option is disable by default' do - options = Asciidoctor::Cli::Options.parse!(%w(test/fixtures/sample.asciidoc)) + options = Asciidoctor::Cli::Options.parse!(%w(test/fixtures/sample.adoc)) assert_equal false, options[:timings] end diff -Nru asciidoctor-1.5.5/test/paragraphs_test.rb asciidoctor-2.0.10/test/paragraphs_test.rb --- asciidoctor-1.5.5/test/paragraphs_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/paragraphs_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,31 +1,28 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context 'Paragraphs' do context 'Normal' do test 'should treat plain text separated by blank lines as paragraphs' do - input = <<-EOS -Plain text for the win! + input = <<~'EOS' + Plain text for the win! -Yep. Text. Plain and simple. + Yep. Text. Plain and simple. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'p', output, 2 assert_xpath '(//p)[1][text() = "Plain text for the win!"]', output, 1 assert_xpath '(//p)[2][text() = "Yep. Text. Plain and simple."]', output, 1 end test 'should associate block title with paragraph' do - input = <<-EOS -.Titled -Paragraph. + input = <<~'EOS' + .Titled + Paragraph. -Winning. + Winning. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'p', output, 2 assert_xpath '(//p)[1]/preceding-sibling::*[@class = "title"]', output, 1 @@ -34,152 +31,153 @@ end test 'no duplicate block before next section' do - input = <<-EOS -= Title + input = <<~'EOS' + = Title -Preamble + Preamble -== First Section + == First Section -Paragraph 1 + Paragraph 1 -Paragraph 2 + Paragraph 2 -== Second Section + == Second Section -Last words + Last words EOS - output = render_string input + output = convert_string input assert_xpath '//p[text() = "Paragraph 2"]', output, 1 end test 'does not treat wrapped line as a list item' do - input = <<-EOS -paragraph -. wrapped line + input = <<~'EOS' + paragraph + . wrapped line EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'p', output, 1 assert_xpath %(//p[text()="paragraph\n. wrapped line"]), output, 1 end test 'does not treat wrapped line as a block title' do - input = <<-EOS -paragraph -.wrapped line + input = <<~'EOS' + paragraph + .wrapped line EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'p', output, 1 assert_xpath %(//p[text()="paragraph\n.wrapped line"]), output, 1 end test 'interprets normal paragraph style as normal paragraph' do - input = <<-EOS -[normal] -Normal paragraph. -Nothing special. + input = <<~'EOS' + [normal] + Normal paragraph. + Nothing special. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'p', output, 1 end test 'removes indentation from literal paragraph marked as normal' do - input = <<-EOS -[normal] - Normal paragraph. - Nothing special. - Last line. + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [normal] + Normal paragraph. + Nothing special. + Last line. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'p', output, 1 assert_xpath %(//p[text()="Normal paragraph.\n Nothing special.\nLast line."]), output, 1 end test 'normal paragraph terminates at block attribute list' do - input = <<-EOS -normal text -[literal] -literal text + input = <<~'EOS' + normal text + [literal] + literal text EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.paragraph:root', output, 1 assert_css '.literalblock:root', output, 1 end test 'normal paragraph terminates at block delimiter' do - input = <<-EOS -normal text --- -text in open block --- + input = <<~'EOS' + normal text + -- + text in open block + -- EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.paragraph:root', output, 1 assert_css '.openblock:root', output, 1 end test 'normal paragraph terminates at list continuation' do - input = <<-EOS -normal text -+ + input = <<~'EOS' + normal text + + EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.paragraph:root', output, 2 assert_xpath %((/*[@class="paragraph"])[1]/p[text() = "normal text"]), output, 1 assert_xpath %((/*[@class="paragraph"])[2]/p[text() = "+"]), output, 1 end test 'normal style turns literal paragraph into normal paragraph' do - input = <<-EOS -[normal] - normal paragraph, - despite the leading indent + input = <<~'EOS' + [normal] + normal paragraph, + despite the leading indent EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.paragraph:root > p', output, 1 end - test 'expands index term macros in DocBook backend' do - input = <<-EOS -Here is an index entry for ((tigers)). -indexterm:[Big cats,Tigers,Siberian Tiger] -Here is an index entry for indexterm2:[Linux]. -(((Operating Systems,Linux,Fedora))) -Note that multi-entry terms generate separate index entries. + test 'automatically promotes index terms in DocBook output if indexterm-promotion-option is set' do + input = <<~'EOS' + Here is an index entry for ((tigers)). + indexterm:[Big cats,Tigers,Siberian Tiger] + Here is an index entry for indexterm2:[Linux]. + (((Operating Systems,Linux,Fedora))) + Note that multi-entry terms generate separate index entries. EOS - output = render_embedded_string input, :attributes => {'backend' => 'docbook45'} + output = convert_string_to_embedded input, backend: 'docbook', attributes: { 'indexterm-promotion-option' => '' } assert_xpath '/simpara', output, 1 - term1 = (xmlnodes_at_xpath '(//indexterm)[1]', output, 1).first - assert_equal 'tigers', term1.to_s + term1 = xmlnodes_at_xpath '(//indexterm)[1]', output, 1 + assert_equal %(\ntigers\n), term1.to_s assert term1.next.content.start_with?('tigers') - term2 = (xmlnodes_at_xpath '(//indexterm)[2]', output, 1).first + term2 = xmlnodes_at_xpath '(//indexterm)[2]', output, 1 term2_elements = term2.elements assert_equal 3, term2_elements.size assert_equal 'Big cats', term2_elements[0].to_s assert_equal 'Tigers', term2_elements[1].to_s assert_equal 'Siberian Tiger', term2_elements[2].to_s - term3 = (xmlnodes_at_xpath '(//indexterm)[3]', output, 1).first + term3 = xmlnodes_at_xpath '(//indexterm)[3]', output, 1 term3_elements = term3.elements assert_equal 2, term3_elements.size assert_equal 'Tigers', term3_elements[0].to_s assert_equal 'Siberian Tiger', term3_elements[1].to_s - term4 = (xmlnodes_at_xpath '(//indexterm)[4]', output, 1).first + term4 = xmlnodes_at_xpath '(//indexterm)[4]', output, 1 term4_elements = term4.elements assert_equal 1, term4_elements.size assert_equal 'Siberian Tiger', term4_elements[0].to_s - term5 = (xmlnodes_at_xpath '(//indexterm)[5]', output, 1).first - assert_equal 'Linux', term5.to_s + term5 = xmlnodes_at_xpath '(//indexterm)[5]', output, 1 + assert_equal %(\nLinux\n), term5.to_s assert term5.next.content.start_with?('Linux') assert_xpath '(//indexterm)[6]/*', output, 3 @@ -187,144 +185,187 @@ assert_xpath '(//indexterm)[8]/*', output, 1 end + test 'does not automatically promote index terms in DocBook output if indexterm-promotion-option is not set' do + input = <<~'EOS' + The Siberian Tiger is one of the biggest living cats. + indexterm:[Big cats,Tigers,Siberian Tiger] + EOS + + output = convert_string_to_embedded input, backend: 'docbook' + + assert_css 'indexterm', output, 1 + + term1 = xmlnodes_at_css 'indexterm', output, 1 + term1_elements = term1.elements + assert_equal 3, term1_elements.size + assert_equal 'Big cats', term1_elements[0].to_s + assert_equal 'Tigers', term1_elements[1].to_s + assert_equal 'Siberian Tiger', term1_elements[2].to_s + end + test 'normal paragraph should honor explicit subs list' do - input = <<-EOS -[subs="specialcharacters"] -** + input = <<~'EOS' + [subs="specialcharacters"] + ** EOS - output = render_embedded_string input - assert output.include?('*<Hey Jude>*') + output = convert_string_to_embedded input + assert_includes output, '*<Hey Jude>*' end test 'normal paragraph should honor specialchars shorthand' do - input = <<-EOS -[subs="specialchars"] -** + input = <<~'EOS' + [subs="specialchars"] + ** EOS - output = render_embedded_string input - assert output.include?('*<Hey Jude>*') + output = convert_string_to_embedded input + assert_includes output, '*<Hey Jude>*' end test 'should add a hardbreak at end of each line when hardbreaks option is set' do - input = <<-EOS -[%hardbreaks] -read -my -lips + input = <<~'EOS' + [%hardbreaks] + read + my + lips EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'br', output, 2 assert_xpath '//p', output, 1 - assert output.include?("

    read
    \nmy
    \nlips

    ") + assert_includes output, "

    read
    \nmy
    \nlips

    " + end + + test 'should be able to toggle hardbreaks by setting hardbreaks-option on document' do + input = <<~'EOS' + :hardbreaks-option: + + make + it + so + + :!hardbreaks: + + roll it back + EOS + + output = convert_string_to_embedded input + assert_xpath '(//p)[1]/br', output, 2 + assert_xpath '(//p)[2]/br', output, 0 end end context 'Literal' do test 'single-line literal paragraphs' do - input = <<-EOS - LITERALS + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + you know what? - ARE LITERALLY + LITERALS - AWESOME! + ARE LITERALLY + + AWESOME! EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//pre', output, 3 end test 'multi-line literal paragraph' do - input = <<-EOS -Install instructions: + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + Install instructions: - yum install ruby rubygems - gem install asciidoctor + yum install ruby rubygems + gem install asciidoctor -You're good to go! + You're good to go! EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//pre', output, 1 # indentation should be trimmed from literal block assert_xpath %(//pre[text() = "yum install ruby rubygems\ngem install asciidoctor"]), output, 1 end test 'literal paragraph' do - input = <<-EOS -[literal] -this text is literally literal + input = <<~'EOS' + [literal] + this text is literally literal EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]//pre[text()="this text is literally literal"]), output, 1 end test 'should read content below literal style verbatim' do - input = <<-EOS -[literal] -image::not-an-image-block[] + input = <<~'EOS' + [literal] + image::not-an-image-block[] EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]//pre[text()="image::not-an-image-block[]"]), output, 1 assert_css 'img', output, 0 end test 'listing paragraph' do - input = <<-EOS -[listing] -this text is a listing + input = <<~'EOS' + [listing] + this text is a listing EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath %(/*[@class="listingblock"]//pre[text()="this text is a listing"]), output, 1 end test 'source paragraph' do - input = <<-EOS -[source] -use the source, luke! + input = <<~'EOS' + [source] + use the source, luke! EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath %(/*[@class="listingblock"]//pre[@class="highlight"]/code[text()="use the source, luke!"]), output, 1 end test 'source code paragraph with language' do - input = <<-EOS -[source, perl] -die 'zomg perl sucks'; + input = <<~'EOS' + [source, perl] + die 'zomg perl is tough'; EOS - output = render_embedded_string input - assert_xpath %(/*[@class="listingblock"]//pre[@class="highlight"]/code[@class="language-perl"][@data-lang="perl"][text()="die 'zomg perl sucks';"]), output, 1 + output = convert_string_to_embedded input + assert_xpath %(/*[@class="listingblock"]//pre[@class="highlight"]/code[@class="language-perl"][@data-lang="perl"][text()="die 'zomg perl is tough';"]), output, 1 end test 'literal paragraph terminates at block attribute list' do - input = <<-EOS - literal text -[normal] -normal text + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + literal text + [normal] + normal text EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]), output, 1 assert_xpath %(/*[@class="paragraph"]), output, 1 end test 'literal paragraph terminates at block delimiter' do - input = <<-EOS - literal text --- -normal text --- + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + literal text + -- + normal text + -- EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]), output, 1 assert_xpath %(/*[@class="openblock"]), output, 1 end test 'literal paragraph terminates at list continuation' do - input = <<-EOS - literal text -+ + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + literal text + + EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]), output, 1 assert_xpath %(/*[@class="literalblock"]//pre[text() = "literal text"]), output, 1 assert_xpath %(/*[@class="paragraph"]), output, 1 @@ -334,30 +375,30 @@ context 'Quote' do test "single-line quote paragraph" do - input = <<-EOS -[quote] -Famous quote. + input = <<~'EOS' + [quote] + Famous quote. EOS - output = render_string input + output = convert_string input assert_xpath '//*[@class = "quoteblock"]', output, 1 assert_xpath '//*[@class = "quoteblock"]//p', output, 0 assert_xpath '//*[@class = "quoteblock"]//*[contains(text(), "Famous quote.")]', output, 1 end test 'quote paragraph terminates at list continuation' do - input = <<-EOS -[quote] -A famouse quote. -+ + input = <<~'EOS' + [quote] + A famouse quote. + + EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css '.quoteblock:root', output, 1 assert_css '.paragraph:root', output, 1 assert_xpath %(/*[@class="paragraph"]/p[text() = "+"]), output, 1 end test "verse paragraph" do - output = render_string("[verse]\nFamous verse.") + output = convert_string("[verse]\nFamous verse.") assert_xpath '//*[@class = "verseblock"]', output, 1 assert_xpath '//*[@class = "verseblock"]/pre', output, 1 assert_xpath '//*[@class = "verseblock"]//p', output, 0 @@ -365,130 +406,156 @@ end test 'should perform normal subs on a verse paragraph' do - input = <<-EOS -[verse] -_GET /groups/link:#group-id[\{group-id\}]_ + input = <<~'EOS' + [verse] + _GET /groups/link:#group-id[\{group-id\}]_ EOS - output = render_embedded_string input - assert output.include?('
    GET /groups/{group-id}
    ') + output = convert_string_to_embedded input + assert_includes output, '
    GET /groups/{group-id}
    ' end test 'quote paragraph should honor explicit subs list' do - input = <<-EOS -[subs="specialcharacters"] -[quote] -*Hey Jude* + input = <<~'EOS' + [subs="specialcharacters"] + [quote] + *Hey Jude* EOS - output = render_embedded_string input - assert output.include?('*Hey Jude*') + output = convert_string_to_embedded input + assert_includes output, '*Hey Jude*' end end context "special" do test "note multiline syntax" do Asciidoctor::ADMONITION_STYLES.each do |style| - assert_xpath "//div[@class='admonitionblock #{style.downcase}']", render_string("[#{style}]\nThis is a winner.") + assert_xpath "//div[@class='admonitionblock #{style.downcase}']", convert_string("[#{style}]\nThis is a winner.") end end test "note block syntax" do Asciidoctor::ADMONITION_STYLES.each do |style| - assert_xpath "//div[@class='admonitionblock #{style.downcase}']", render_string("[#{style}]\n====\nThis is a winner.\n====") + assert_xpath "//div[@class='admonitionblock #{style.downcase}']", convert_string("[#{style}]\n====\nThis is a winner.\n====") end end test "note inline syntax" do Asciidoctor::ADMONITION_STYLES.each do |style| - assert_xpath "//div[@class='admonitionblock #{style.downcase}']", render_string("#{style}: This is important, fool!") + assert_xpath "//div[@class='admonitionblock #{style.downcase}']", convert_string("#{style}: This is important, fool!") end end - test "sidebar block" do - input = <<-EOS -== Section - -.Sidebar -**** -Content goes here -**** + test 'should process preprocessor conditional in paragraph content' do + input = <<~'EOS' + ifdef::asciidoctor-version[] + [sidebar] + First line of sidebar. + ifdef::backend[The backend is {backend}.] + Last line of sidebar. + endif::[] + EOS + + expected = <<~'EOS'.chop +
    +
    + First line of sidebar. + The backend is html5. + Last line of sidebar. +
    +
    EOS - result = render_string(input) - assert_xpath "//*[@class='sidebarblock']//p", result, 1 + + result = convert_string_to_embedded input + assert_equal expected, result end context 'Styled Paragraphs' do - test 'should wrap text in simpara for styled paragraphs when rendered to DocBook' do - input = <<-EOS -= Book -:doctype: book + test 'should wrap text in simpara for styled paragraphs when converted to DocBook' do + input = <<~'EOS' + = Book + :doctype: book + + [preface] + = About this book -[preface] -= About this book + [abstract] + An abstract for the book. -[abstract] -An abstract for the book. + = Part 1 -= Part 1 + [partintro] + An intro to this part. -[partintro] -An intro to this part. + == Chapter 1 -== Chapter 1 + [sidebar] + Just a side note. -[sidebar] -Just a side note. + [example] + As you can see here. -[example] -As you can see here. + [quote] + Wise words from a wise person. -[quote] -Wise words from a wise person. + [open] + Make it what you want. EOS - output = render_string input, :backend => 'docbook' + output = convert_string input, backend: 'docbook' assert_css 'abstract > simpara', output, 1 assert_css 'partintro > simpara', output, 1 assert_css 'sidebar > simpara', output, 1 assert_css 'informalexample > simpara', output, 1 assert_css 'blockquote > simpara', output, 1 + assert_css 'chapter > simpara', output, 1 end - test 'should wrap text in simpara for styled paragraphs with title when rendered to DocBook' do - input = <<-EOS -= Book -:doctype: book + test 'should convert open paragraph to open block' do + input = <<~'EOS' + [open] + Make it what you want. + EOS + + output = convert_string_to_embedded input + assert_css '.openblock', output, 1 + assert_css '.openblock p', output, 0 + end -[preface] -= About this book + test 'should wrap text in simpara for styled paragraphs with title when converted to DocBook' do + input = <<~'EOS' + = Book + :doctype: book -[abstract] -.Abstract title -An abstract for the book. + [preface] + = About this book -= Part 1 + [abstract] + .Abstract title + An abstract for the book. -[partintro] -.Part intro title -An intro to this part. + = Part 1 -== Chapter 1 + [partintro] + .Part intro title + An intro to this part. -[sidebar] -.Sidebar title -Just a side note. + == Chapter 1 -[example] -.Example title -As you can see here. + [sidebar] + .Sidebar title + Just a side note. -[quote] -.Quote title -Wise words from a wise person. + [example] + .Example title + As you can see here. + + [quote] + .Quote title + Wise words from a wise person. EOS - output = render_string input, :backend => 'docbook' + output = convert_string input, backend: 'docbook' assert_css 'abstract > title', output, 1 assert_xpath '//abstract/title[text() = "Abstract title"]', output, 1 assert_css 'abstract > title + simpara', output, 1 @@ -510,14 +577,41 @@ context 'Inline doctype' do test 'should only format and output text in first paragraph when doctype is inline' do input = "http://asciidoc.org[AsciiDoc] is a _lightweight_ markup language...\n\nignored" - output = render_string input, :doctype => 'inline' + output = convert_string input, doctype: 'inline' assert_equal 'AsciiDoc is a lightweight markup language…​', output end - test 'should output nil if first block is not a paragraph' do + test 'should output nil and warn if first block is not a paragraph' do input = '* bullet' - output = render_string input, :doctype => 'inline' - assert output.nil? + using_memory_logger do |logger| + output = convert_string input, doctype: 'inline' + assert_nil output + assert_message logger, :WARN, '~no inline candidate' + end + end + end + end + + context 'Custom' do + test 'should not warn if paragraph style is unregisted' do + input = <<~'EOS' + [foo] + bar + EOS + using_memory_logger do |logger| + convert_string_to_embedded input + assert_empty logger.messages + end + end + + test 'should log debug message if paragraph style is unknown and debug level is enabled' do + input = <<~'EOS' + [foo] + bar + EOS + using_memory_logger Logger::Severity::DEBUG do |logger| + convert_string_to_embedded input + assert_message logger, :DEBUG, ': line 2: unknown style for paragraph: foo', Hash end end end diff -Nru asciidoctor-1.5.5/test/parser_test.rb asciidoctor-2.0.10/test/parser_test.rb --- asciidoctor-1.5.5/test/parser_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/parser_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,11 +1,7 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context "Parser" do - test "is_section_title?" do assert Asciidoctor::Parser.is_section_title?('AsciiDoc Home Page', '==================') assert Asciidoctor::Parser.is_section_title?('=== AsciiDoc Home Page') @@ -17,194 +13,89 @@ assert_equal 'foo3-bar', Asciidoctor::Parser.sanitize_attribute_name("Foo 3^ # - Bar[") end - test "collect unnamed attribute" do - attributes = {} - line = 'quote' - expected = {1 => 'quote'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect unnamed attribute double-quoted" do - attributes = {} - line = '"quote"' - expected = {1 => 'quote'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect empty unnamed attribute double-quoted" do - attributes = {} - line = '""' - expected = {1 => ''} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect unnamed attribute double-quoted containing escaped quote" do - attributes = {} - line = '"ba\"zaar"' - expected = {1 => 'ba"zaar'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect unnamed attribute single-quoted" do - attributes = {} - line = '\'quote\'' - expected = {1 => 'quote'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect empty unnamed attribute single-quoted" do - attributes = {} - line = '\'\'' - expected = {1 => ''} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect unnamed attribute single-quoted containing escaped quote" do - attributes = {} - line = '\'ba\\\'zaar\'' - expected = {1 => 'ba\'zaar'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect unnamed attribute with dangling delimiter" do - attributes = {} - line = 'quote , ' - expected = {1 => 'quote'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect unnamed attribute in second position after empty attribute" do - attributes = {} - line = ', John Smith' - expected = {1 => nil, 2 => 'John Smith'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect unnamed attributes" do - attributes = {} - line = "first, second one, third" - expected = {1 => 'first', 2 => 'second one', 3 => 'third'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect named attribute" do - attributes = {} - line = 'foo=bar' - expected = {'foo' => 'bar'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect named attribute double-quoted" do - attributes = {} - line = 'foo="bar"' - expected = {'foo' => 'bar'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test 'collect named attribute with double-quoted empty value' do - attributes = {} - line = 'height=100,caption="",link="images/octocat.png"' - expected = {'height' => '100', 'caption' => '', 'link' => 'images/octocat.png'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect named attribute single-quoted" do - attributes = {} - line = 'foo=\'bar\'' - expected = {'foo' => 'bar'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test 'collect named attribute with single-quoted empty value' do - attributes = {} - line = "height=100,caption='',link='images/octocat.png'" - expected = {'height' => '100', 'caption' => '', 'link' => 'images/octocat.png'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect named attributes unquoted" do - attributes = {} - line = "first=value, second=two, third=3" - expected = {'first' => 'value', 'second' => 'two', 'third' => '3'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect named attributes quoted" do - attributes = {} - line = "first='value', second=\"value two\", third=three" - expected = {'first' => 'value', 'second' => 'value two', 'third' => 'three'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect named attributes quoted containing non-semantic spaces" do - attributes = {} - line = " first = 'value', second =\"value two\" , third= three " - expected = {'first' => 'value', 'second' => 'value two', 'third' => 'three'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect mixed named and unnamed attributes" do - attributes = {} - line = "first, second=\"value two\", third=three, Sherlock Holmes" - expected = {1 => 'first', 'second' => 'value two', 'third' => 'three', 4 => 'Sherlock Holmes'} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect options attribute" do - attributes = {} - line = "quote, options='opt1,opt2 , opt3'" - expected = {1 => 'quote', 'options' => 'opt1,opt2 , opt3', 'opt1-option' => '', 'opt2-option' => '', 'opt3-option' => ''} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect opts attribute as options" do - attributes = {} - line = "quote, opts='opt1,opt2 , opt3'" - expected = {1 => 'quote', 'options' => 'opt1,opt2 , opt3', 'opt1-option' => '', 'opt2-option' => '', 'opt3-option' => ''} - Asciidoctor::AttributeList.new(line).parse_into(attributes) - assert_equal expected, attributes - end - - test "collect and rekey unnamed attributes" do - attributes = {} - line = "first, second one, third, fourth" - expected = {1 => 'first', 2 => 'second one', 3 => 'third', 4 => 'fourth', 'a' => 'first', 'b' => 'second one', 'c' => 'third'} - Asciidoctor::AttributeList.new(line).parse_into(attributes, ['a', 'b', 'c']) - assert_equal expected, attributes - end - - test "rekey positional attributes" do - attributes = {1 => 'source', 2 => 'java'} - expected = {1 => 'source', 2 => 'java', 'style' => 'source', 'language' => 'java'} - Asciidoctor::AttributeList.rekey(attributes, ['style', 'language', 'linenums']) - assert_equal expected, attributes + test 'store attribute with value' do + attr_name, attr_value = Asciidoctor::Parser.store_attribute 'foo', 'bar' + assert_equal 'foo', attr_name + assert_equal 'bar', attr_value + end + + test 'store attribute with negated value' do + { 'foo!' => nil, '!foo' => nil, 'foo' => nil }.each do |name, value| + attr_name, attr_value = Asciidoctor::Parser.store_attribute name, value + assert_equal name.sub('!', ''), attr_name + assert_nil attr_value + end + end + + test 'store accessible attribute on document with value' do + doc = empty_document + doc.set_attribute 'foo', 'baz' + attrs = {} + attr_name, attr_value = Asciidoctor::Parser.store_attribute 'foo', 'bar', doc, attrs + assert_equal 'foo', attr_name + assert_equal 'bar', attr_value + assert_equal 'bar', (doc.attr 'foo') + assert attrs.key?(:attribute_entries) + assert_equal 1, attrs[:attribute_entries].size + assert_equal 'foo', attrs[:attribute_entries][0].name + assert_equal 'bar', attrs[:attribute_entries][0].value + end + + test 'store accessible attribute on document with value that contains attribute reference' do + doc = empty_document + doc.set_attribute 'foo', 'baz' + doc.set_attribute 'release', 'ultramega' + attrs = {} + attr_name, attr_value = Asciidoctor::Parser.store_attribute 'foo', '{release}', doc, attrs + assert_equal 'foo', attr_name + assert_equal 'ultramega', attr_value + assert_equal 'ultramega', (doc.attr 'foo') + assert attrs.key?(:attribute_entries) + assert_equal 1, attrs[:attribute_entries].size + assert_equal 'foo', attrs[:attribute_entries][0].name + assert_equal 'ultramega', attrs[:attribute_entries][0].value + end + + test 'store inaccessible attribute on document with value' do + doc = empty_document attributes: { 'foo' => 'baz' } + attrs = {} + attr_name, attr_value = Asciidoctor::Parser.store_attribute 'foo', 'bar', doc, attrs + assert_equal 'foo', attr_name + assert_equal 'bar', attr_value + assert_equal 'baz', (doc.attr 'foo') + refute attrs.key?(:attribute_entries) + end + + test 'store accessible attribute on document with negated value' do + { 'foo!' => nil, '!foo' => nil, 'foo' => nil }.each do |name, value| + doc = empty_document + doc.set_attribute 'foo', 'baz' + attrs = {} + attr_name, attr_value = Asciidoctor::Parser.store_attribute name, value, doc, attrs + assert_equal name.sub('!', ''), attr_name + assert_nil attr_value + assert attrs.key?(:attribute_entries) + assert_equal 1, attrs[:attribute_entries].size + assert_equal 'foo', attrs[:attribute_entries][0].name + assert_nil attrs[:attribute_entries][0].value + end + end + + test 'store inaccessible attribute on document with negated value' do + { 'foo!' => nil, '!foo' => nil, 'foo' => nil }.each do |name, value| + doc = empty_document attributes: { 'foo' => 'baz' } + attrs = {} + attr_name, attr_value = Asciidoctor::Parser.store_attribute name, value, doc, attrs + assert_equal name.sub('!', ''), attr_name + assert_nil attr_value + refute attrs.key?(:attribute_entries) + end end test 'parse style attribute with id and role' do - attributes = {1 => 'style#id.role'} - style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) + attributes = { 1 => 'style#id.role' } + style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style - assert_nil original_style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role', attributes['role'] @@ -212,23 +103,21 @@ end test 'parse style attribute with style, role, id and option' do - attributes = {1 => 'style.role#id%fragment'} - style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) + attributes = { 1 => 'style.role#id%fragment' } + style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style - assert_nil original_style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role', attributes['role'] assert_equal '', attributes['fragment-option'] - assert_equal 'fragment', attributes['options'] assert_equal 'style.role#id%fragment', attributes[1] + refute attributes.key? 'options' end test 'parse style attribute with style, id and multiple roles' do - attributes = {1 => 'style#id.role1.role2'} - style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) + attributes = { 1 => 'style#id.role1.role2' } + style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style - assert_nil original_style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role1 role2', attributes['role'] @@ -236,10 +125,9 @@ end test 'parse style attribute with style, multiple roles and id' do - attributes = {1 => 'style.role1.role2#id'} - style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) + attributes = { 1 => 'style.role1.role2#id' } + style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style - assert_nil original_style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role1 role2', attributes['role'] @@ -247,40 +135,35 @@ end test 'parse style attribute with positional and original style' do - attributes = {1 => 'new_style', 'style' => 'original_style'} - style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) + attributes = { 1 => 'new_style', 'style' => 'original_style' } + style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'new_style', style - assert_equal 'original_style', original_style assert_equal 'new_style', attributes['style'] assert_equal 'new_style', attributes[1] end test 'parse style attribute with id and role only' do - attributes = {1 => '#id.role'} - style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) + attributes = { 1 => '#id.role' } + style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_nil style - assert_nil original_style assert_equal 'id', attributes['id'] assert_equal 'role', attributes['role'] assert_equal '#id.role', attributes[1] end test 'parse empty style attribute' do - attributes = {1 => nil} - style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) + attributes = { 1 => nil } + style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_nil style - assert_nil original_style assert_nil attributes['id'] assert_nil attributes['role'] assert_nil attributes[1] end test 'parse style attribute with option should preserve existing options' do - attributes = {1 => '%header', 'options' => 'footer', 'footer-option' => ''} - style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) + attributes = { 1 => '%header', 'footer-option' => '' } + style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_nil style - assert_nil original_style - assert_equal 'header,footer', attributes['options'] assert_equal '', attributes['header-option'] assert_equal '', attributes['footer-option'] end @@ -410,7 +293,7 @@ assert_equal 'Stéphane', metadata['firstname'] assert_equal 'Brontë', metadata['lastname'] assert_equal 'SB', metadata['authorinitials'] - end if ::RUBY_MIN_VERSION_1_9 + end test 'parse ideographic author names' do metadata, _ = parse_header_metadata '李 四 ' @@ -422,10 +305,10 @@ assert_equal '四', metadata['lastname'] assert_equal 'si.li@example.com', metadata['email'] assert_equal '李四', metadata['authorinitials'] - end if ::RUBY_MIN_VERSION_1_9 + end test "parse author condenses whitespace" do - metadata, _ = parse_header_metadata ' Stuart Rackham ' + metadata, _ = parse_header_metadata 'Stuart Rackham ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stuart Rackham', metadata['author'] @@ -455,10 +338,96 @@ assert_equal 'John Smith', metadata['author_2'] end + test 'should not parse multiple authors if semi-colon is not followed by space' do + metadata, _ = parse_header_metadata 'Joe Doe;Smith Johnson' + assert_equal 1, metadata['authorcount'] + end + + test 'skips blank author entries in implicit author line' do + metadata, _ = parse_header_metadata 'Doc Writer; ; John Smith ;' + assert_equal 2, metadata['authorcount'] + assert_equal 'Doc Writer', metadata['author_1'] + assert_equal 'John Smith', metadata['author_2'] + end + + test 'parse name with more than 3 parts in author attribute' do + doc = empty_document + parse_header_metadata ':author: Leroy Harold Scherer, Jr.', doc + assert_equal 'Leroy Harold Scherer, Jr.', doc.attributes['author'] + assert_equal 'Leroy', doc.attributes['firstname'] + assert_equal 'Harold', doc.attributes['middlename'] + assert_equal 'Scherer, Jr.', doc.attributes['lastname'] + end + + test 'use explicit authorinitials if set after implicit author line' do + input = <<~'EOS' + Jean-Claude Van Damme + :authorinitials: JCVD + EOS + doc = empty_document + parse_header_metadata input, doc + assert_equal 'JCVD', doc.attributes['authorinitials'] + end + + test 'use explicit authorinitials if set after author attribute' do + input = <<~'EOS' + :author: Jean-Claude Van Damme + :authorinitials: JCVD + EOS + doc = empty_document + parse_header_metadata input, doc + assert_equal 'JCVD', doc.attributes['authorinitials'] + end + + test 'sets authorcount to 0 if document has no authors' do + input = '' + doc = empty_document + metadata, _ = parse_header_metadata input, doc + assert_equal 0, doc.attributes['authorcount'] + assert_equal 0, metadata['authorcount'] + end + + test 'does not drop name joiner when using multiple authors' do + input = 'Kismet Chameleon; Lazarus het_Draeke' + doc = empty_document + parse_header_metadata input, doc + assert_equal 2, doc.attributes['authorcount'] + assert_equal 'Kismet Chameleon, Lazarus het Draeke', doc.attributes['authors'] + assert_equal 'Kismet Chameleon', doc.attributes['author_1'] + assert_equal 'Lazarus het Draeke', doc.attributes['author_2'] + assert_equal 'het Draeke', doc.attributes['lastname_2'] + end + + test 'allows authors to be overridden using explicit author attributes' do + input = <<~'EOS' + Kismet Chameleon; Johnny Bravo; Lazarus het_Draeke + :author_2: Danger Mouse + EOS + doc = empty_document + parse_header_metadata input, doc + assert_equal 3, doc.attributes['authorcount'] + assert_equal 'Kismet Chameleon, Danger Mouse, Lazarus het Draeke', doc.attributes['authors'] + assert_equal 'Kismet Chameleon', doc.attributes['author_1'] + assert_equal 'Danger Mouse', doc.attributes['author_2'] + assert_equal 'Lazarus het Draeke', doc.attributes['author_3'] + assert_equal 'het Draeke', doc.attributes['lastname_3'] + end + + test 'removes formatting before partitioning author defined using author attribute' do + input = ':author: pass:n[http://example.org/community/team.html[Ze_**Project** team]]' + + doc = empty_document + parse_header_metadata input, doc + assert_equal 1, doc.attributes['authorcount'] + assert_equal 'Ze Project team', doc.attributes['authors'] + assert_equal 'Ze Project', doc.attributes['firstname'] + assert_equal 'team', doc.attributes['lastname'] + end + test "parse rev number date remark" do - input = <<-EOS -Ryan Waldron -v0.0.7, 2013-12-18: The first release you can stand on + input = <<~'EOS' + Ryan Waldron + v0.0.7, 2013-12-18: The first release you can stand on EOS metadata, _ = parse_header_metadata input assert_equal 9, metadata.size @@ -467,10 +436,38 @@ assert_equal 'The first release you can stand on', metadata['revremark'] end + test 'parse rev number, data, and remark as attribute references' do + input = <<~'EOS' + Author Name + v{project-version}, {release-date}: {release-summary} + EOS + metadata, _ = parse_header_metadata input + assert_equal 9, metadata.size + assert_equal '{project-version}', metadata['revnumber'] + assert_equal '{release-date}', metadata['revdate'] + assert_equal '{release-summary}', metadata['revremark'] + end + + test 'should resolve attribute references in rev number, data, and remark' do + input = <<~'EOS' + = Document Title + Author Name + {project-version}, {release-date}: {release-summary} + EOS + doc = document_from_string input, attributes: { + 'project-version' => '1.0.1', + 'release-date' => '2018-05-15', + 'release-summary' => 'The one you can count on!', + } + assert_equal '1.0.1', (doc.attr 'revnumber') + assert_equal '2018-05-15', (doc.attr 'revdate') + assert_equal 'The one you can count on!', (doc.attr 'revremark') + end + test "parse rev date" do - input = <<-EOS -Ryan Waldron -2013-12-18 + input = <<~'EOS' + Ryan Waldron + 2013-12-18 EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size @@ -478,33 +475,33 @@ end test 'parse rev number with trailing comma' do - input = <<-EOS -Stuart Rackham -v8.6.8, + input = <<~'EOS' + Stuart Rackham + v8.6.8, EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal '8.6.8', metadata['revnumber'] - assert !metadata.has_key?('revdate') + refute metadata.has_key?('revdate') end # Asciidoctor recognizes a standalone revision without a trailing comma test 'parse rev number' do - input = <<-EOS -Stuart Rackham -v8.6.8 + input = <<~'EOS' + Stuart Rackham + v8.6.8 EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal '8.6.8', metadata['revnumber'] - assert !metadata.has_key?('revdate') + refute metadata.has_key?('revdate') end # while compliant w/ AsciiDoc, this is just sloppy parsing test "treats arbitrary text on rev line as revdate" do - input = <<-EOS -Ryan Waldron -foobar + input = <<~'EOS' + Ryan Waldron + foobar EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size @@ -512,9 +509,9 @@ end test "parse rev date remark" do - input = <<-EOS -Ryan Waldron -2013-12-18: The first release you can stand on + input = <<~'EOS' + Ryan Waldron + 2013-12-18: The first release you can stand on EOS metadata, _ = parse_header_metadata input assert_equal 8, metadata.size @@ -523,30 +520,31 @@ end test "should not mistake attribute entry as rev remark" do - input = <<-EOS -Joe Cool -:page-layout: post + input = <<~'EOS' + Joe Cool + :page-layout: post EOS metadata, _ = parse_header_metadata input refute_equal 'page-layout: post', metadata['revremark'] - assert !metadata.has_key?('revdate') + refute metadata.has_key?('revdate') end test "parse rev remark only" do - input = <<-EOS -Joe Cool - :Must start revremark-only line with space + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + Joe Cool + :Must start revremark-only line with space EOS metadata, _ = parse_header_metadata input assert_equal 'Must start revremark-only line with space', metadata['revremark'] - assert !metadata.has_key?('revdate') + refute metadata.has_key?('revdate') end test "skip line comments before author" do - input = <<-EOS -// Asciidoctor -// release artist -Ryan Waldron + input = <<~'EOS' + // Asciidoctor + // release artist + Ryan Waldron EOS metadata, _ = parse_header_metadata input assert_equal 6, metadata.size @@ -558,12 +556,12 @@ end test "skip block comment before author" do - input = <<-EOS -//// -Asciidoctor -release artist -//// -Ryan Waldron + input = <<~'EOS' + //// + Asciidoctor + release artist + //// + Ryan Waldron EOS metadata, _ = parse_header_metadata input assert_equal 6, metadata.size @@ -575,13 +573,13 @@ end test "skip block comment before rev" do - input = <<-EOS -Ryan Waldron -//// -Asciidoctor -release info -//// -v0.0.7, 2013-12-18 + input = <<~'EOS' + Ryan Waldron + //// + Asciidoctor + release info + //// + v0.0.7, 2013-12-18 EOS metadata, _ = parse_header_metadata input assert_equal 8, metadata.size @@ -591,112 +589,126 @@ assert_equal '2013-12-18', metadata['revdate'] end - test "attribute entry overrides generated author initials" do - blankdoc = Asciidoctor::Document.new - reader = Asciidoctor::Reader.new "Stuart Rackham \n:Author Initials: SJR".lines.entries - metadata = Asciidoctor::Parser.parse_header_metadata(reader, blankdoc) + test 'break header at line with three forward slashes' do + input = <<~'EOS' + Joe Cool + v1.0 + /// + stuff + EOS + metadata, _ = parse_header_metadata input + assert_equal 7, metadata.size + assert_equal 1, metadata['authorcount'] + assert_equal 'Joe Cool', metadata['author'] + assert_equal '1.0', metadata['revnumber'] + end + + test 'attribute entry overrides generated author initials' do + doc = empty_document + metadata, _ = parse_header_metadata %(Stuart Rackham \n:Author Initials: SJR), doc assert_equal 'SR', metadata['authorinitials'] - assert_equal 'SJR', blankdoc.attributes['authorinitials'] + assert_equal 'SJR', doc.attributes['authorinitials'] end test 'adjust indentation to 0' do - input = <<-EOS.chomp - def names + input = <<~EOS + \x20 def names - @name.split ' ' + \x20 @name.split - end + \x20 end EOS - expected = <<-EOS.chomp -def names + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + def names - @name.split ' ' + @name.split -end + end EOS - lines = input.split("\n") + lines = input.split ?\n Asciidoctor::Parser.adjust_indentation! lines - assert_equal expected, (lines * "\n") + assert_equal expected, (lines * ?\n) end test 'adjust indentation mixed with tabs and spaces to 0' do - input = <<-EOS.chomp - def names + input = <<~EOS + def names -\t @name.split ' ' + \t @name.split - end + end EOS - expected = <<-EOS.chomp -def names + expected = <<~EOS.chop + def names - @name.split ' ' + @name.split -end + end EOS - lines = input.split("\n") + lines = input.split ?\n Asciidoctor::Parser.adjust_indentation! lines, 0, 4 - assert_equal expected, (lines * "\n") + assert_equal expected, (lines * ?\n) end test 'expands tabs to spaces' do - input = <<-EOS.chomp -Filesystem Size Used Avail Use% Mounted on -Filesystem Size Used Avail Use% Mounted on -devtmpfs 3.9G 0 3.9G 0% /dev -/dev/mapper/fedora-root 48G 18G 29G 39% / + input = <<~'EOS' + Filesystem Size Used Avail Use% Mounted on + Filesystem Size Used Avail Use% Mounted on + devtmpfs 3.9G 0 3.9G 0% /dev + /dev/mapper/fedora-root 48G 18G 29G 39% / EOS - expected = <<-EOS.chomp -Filesystem Size Used Avail Use% Mounted on -Filesystem Size Used Avail Use% Mounted on -devtmpfs 3.9G 0 3.9G 0% /dev -/dev/mapper/fedora-root 48G 18G 29G 39% / + expected = <<~'EOS'.chop + Filesystem Size Used Avail Use% Mounted on + Filesystem Size Used Avail Use% Mounted on + devtmpfs 3.9G 0 3.9G 0% /dev + /dev/mapper/fedora-root 48G 18G 29G 39% / EOS - lines = input.split("\n") + lines = input.split ?\n Asciidoctor::Parser.adjust_indentation! lines, 0, 4 - assert_equal expected, (lines * "\n") + assert_equal expected, (lines * ?\n) end test 'adjust indentation to non-zero' do - input = <<-EOS.chomp - def names + input = <<~EOS + \x20 def names - @name.split ' ' + \x20 @name.split - end + \x20 end EOS - expected = <<-EOS.chomp - def names + expected = <<~EOS.chop + \x20 def names - @name.split ' ' + \x20 @name.split - end + \x20 end EOS - lines = input.split("\n") + lines = input.split ?\n Asciidoctor::Parser.adjust_indentation! lines, 2 - assert_equal expected, (lines * "\n") + assert_equal expected, (lines * ?\n) end test 'preserve block indent if indent is -1' do - input = <<-EOS - def names + input = <<~EOS + \x20 def names - @name.split ' ' + \x20 @name.split - end + \x20 end EOS expected = input - lines = input.lines.entries + lines = input.lines Asciidoctor::Parser.adjust_indentation! lines, -1 assert_equal expected, lines.join end @@ -710,4 +722,19 @@ assert_equal expected, lines end + test 'should warn if inline anchor is already in use' do + input = <<~'EOS' + [#in-use] + A paragraph with an id. + + Another paragraph + [[in-use]]that uses an id + which is already in use. + EOS + + using_memory_logger do |logger| + document_from_string input + assert_message logger, :WARN, ': line 5: id assigned to anchor already in use: in-use', Hash + end + end end diff -Nru asciidoctor-1.5.5/test/paths_test.rb asciidoctor-2.0.10/test/paths_test.rb --- asciidoctor-1.5.5/test/paths_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/paths_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,8 +1,5 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context 'Path Resolver' do context 'Web Paths' do @@ -88,31 +85,68 @@ assert_equal 'assets/images', @resolver.web_path(nil, 'assets/images') end - test 'posixfies windows paths' do + test 'posixifies windows paths' do + @resolver.file_separator = '\\' assert_equal '/images', @resolver.web_path('\\images') assert_equal '../images', @resolver.web_path('..\\images') assert_equal '/images', @resolver.web_path('\\..\\images') assert_equal 'assets/images', @resolver.web_path('assets\\images') assert_equal '../assets/images', @resolver.web_path('assets\\images', '..\\images\\..') end + + test 'URL encode spaces in path' do + assert_equal 'assets%20and%20stuff/lots%20of%20images', @resolver.web_path('lots of images', 'assets and stuff') + end end context 'System Paths' do JAIL = '/home/doctor/docs' + default_logger = Asciidoctor::LoggerManager.logger def setup @resolver = Asciidoctor::PathResolver.new + @logger = (Asciidoctor::LoggerManager.logger = Asciidoctor::MemoryLogger.new) + end + + teardown do + Asciidoctor::LoggerManager.logger = default_logger + end + + test 'raises security error if jail is not an absolute path' do + begin + @resolver.system_path('images/tiger.png', '/etc', 'foo') + flunk 'Expecting SecurityError to be raised' + rescue SecurityError + end end + #test 'raises security error if jail is not a canoncial path' do + # begin + # @resolver.system_path('images/tiger.png', '/etc', %(#{JAIL}/../foo)) + # flunk 'Expecting SecurityError to be raised' + # rescue SecurityError + # end + #end + test 'prevents access to paths outside of jail' do - assert_equal "#{JAIL}/css", @resolver.system_path('../../../../../css', "#{JAIL}/assets/stylesheets", JAIL) - assert_equal "#{JAIL}/css", @resolver.system_path('/../../../../../css', "#{JAIL}/assets/stylesheets", JAIL) - assert_equal "#{JAIL}/css", @resolver.system_path('../../../css', '../../..', JAIL) + result = @resolver.system_path '../../../../../css', %(#{JAIL}/assets/stylesheets), JAIL + assert_equal %(#{JAIL}/css), result + assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' + + @logger.clear + result = @resolver.system_path '/../../../../../css', %(#{JAIL}/assets/stylesheets), JAIL + assert_equal %(#{JAIL}/css), result + assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' + + @logger.clear + result = @resolver.system_path '../../../css', '../../..', JAIL + assert_equal %(#{JAIL}/css), result + assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' end test 'throws exception for illegal path access if recover is false' do begin - @resolver.system_path('../../../../../css', "#{JAIL}/assets/stylesheets", JAIL, :recover => false) + @resolver.system_path('../../../../../css', "#{JAIL}/assets/stylesheets", JAIL, recover: false) flunk 'Expecting SecurityError to be raised' rescue SecurityError end @@ -123,15 +157,39 @@ assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path(nil, "#{JAIL}/assets/stylesheets", JAIL) end + test 'expands parent references in start path if target is empty' do + assert_equal "#{JAIL}/stylesheets", @resolver.system_path('', "#{JAIL}/assets/../stylesheets", JAIL) + end + + test 'expands parent references in start path if target is not empty' do + assert_equal "#{JAIL}/stylesheets/site.css", @resolver.system_path('site.css', "#{JAIL}/assets/../stylesheets", JAIL) + end + test 'resolves start path if target is dot' do assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('.', "#{JAIL}/assets/stylesheets", JAIL) assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('./', "#{JAIL}/assets/stylesheets", JAIL) end - test 'treats absolute target as relative when jail is specified' do - assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('/', "#{JAIL}/assets/stylesheets", JAIL) - assert_equal "#{JAIL}/assets/stylesheets/foo", @resolver.system_path('/foo', "#{JAIL}/assets/stylesheets", JAIL) - assert_equal "#{JAIL}/assets/foo", @resolver.system_path('/../foo', "#{JAIL}/assets/stylesheets", JAIL) + test 'treats absolute target outside of jail as relative when jail is specified' do + result = @resolver.system_path '/', "#{JAIL}/assets/stylesheets", JAIL + assert_equal JAIL, result + assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' + + @logger.clear + result = @resolver.system_path '/foo', "#{JAIL}/assets/stylesheets", JAIL + assert_equal "#{JAIL}/foo", result + assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' + + @logger.clear + result = @resolver.system_path '/../foo', "#{JAIL}/assets/stylesheets", JAIL + assert_equal "#{JAIL}/foo", result + assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' + + @logger.clear + @resolver.file_separator = '\\' + result = @resolver.system_path 'baz.adoc', 'C:/foo', 'C:/bar' + assert_equal 'C:/bar/baz.adoc', result + assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' end test 'allows use of absolute target or start if resolved path is sub-path of jail' do @@ -140,6 +198,9 @@ assert_equal "#{JAIL}/my/path", @resolver.system_path('', "#{JAIL}/my/path", JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path(nil, "#{JAIL}/my/path", JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path('path', "#{JAIL}/my", JAIL) + assert_equal '/foo/bar/baz.adoc', @resolver.system_path('/foo/bar/baz.adoc', nil, '/') + assert_equal '/foo/bar/baz.adoc', @resolver.system_path('baz.adoc', '/foo/bar', '/') + assert_equal '/foo/bar/baz.adoc', @resolver.system_path('baz.adoc', 'foo/bar', '/') end test 'uses jail path if start path is empty' do @@ -147,20 +208,59 @@ assert_equal "#{JAIL}/images/tiger.png", @resolver.system_path('images/tiger.png', nil, JAIL) end - test 'raises security error if start is not contained within jail' do + test 'warns if start is not contained within jail' do + result = @resolver.system_path 'images/tiger.png', '/etc', JAIL + assert_equal %(#{JAIL}/images/tiger.png), result + assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' + + @logger.clear + result = @resolver.system_path '.', '/etc', JAIL + assert_equal JAIL, result + assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' + + @logger.clear + @resolver.file_separator = '\\' + result = @resolver.system_path '.', 'C:/foo', 'C:/bar' + assert_equal 'C:/bar', result + assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' + end + + test 'allows start path to be parent of jail if resolved target is inside jail' do + assert_equal "#{JAIL}/foo/path", @resolver.system_path('foo/path', JAIL, "#{JAIL}/foo") + @resolver.file_separator = '\\' + assert_equal "C:/dev/project/README.adoc", @resolver.system_path('project/README.adoc', 'C:/dev', 'C:/dev/project') + end + + test 'relocates target to jail if resolved value fails outside of jail' do + result = @resolver.system_path 'bar/baz.adoc', JAIL, "#{JAIL}/foo" + assert_equal %(#{JAIL}/foo/bar/baz.adoc), result + assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' + + @logger.clear + @resolver.file_separator = '\\' + result = @resolver.system_path 'bar/baz.adoc', 'D:/', 'C:/foo' + assert_equal 'C:/foo/bar/baz.adoc', result + assert_message @logger, :WARN, '~outside of jail root' + end + + test 'raises security error if start is not contained within jail and recover is disabled' do begin - @resolver.system_path('images/tiger.png', '/etc', JAIL) + @resolver.system_path('images/tiger.png', '/etc', JAIL, recover: false) flunk 'Expecting SecurityError to be raised' rescue SecurityError end begin - @resolver.system_path('.', '/etc', JAIL) + @resolver.system_path('.', '/etc', JAIL, recover: false) flunk 'Expecting SecurityError to be raised' rescue SecurityError end end + test 'expands parent references in absolute path if jail is not specified' do + assert_equal '/etc/stylesheet.css', @resolver.system_path('/usr/share/../../etc/stylesheet.css') + end + test 'resolves absolute directory if jail is not specified' do assert_equal '/usr/share/stylesheet.css', @resolver.system_path('/usr/share/stylesheet.css', '/home/dallen/docs/assets/stylesheets') end @@ -173,14 +273,32 @@ assert_equal '/usr/share/assets/stylesheet.css', @resolver.system_path('assets/stylesheet.css', '/usr/share') end - test 'resolves absolute UNC path if start is absolute and target is relative' do + test 'File.dirname preserves UNC path root on Windows' do + assert_equal File.dirname('\\\\server\\docs\\file.html'), '\\\\server\\docs' + end if windows? + + test 'File.dirname preserves posix-style UNC path root on Windows' do + assert_equal File.dirname('//server/docs/file.html'), '//server/docs' + end if windows? + + test 'resolves UNC path if start is absolute and target is relative' do assert_equal '//QA/c$/users/asciidoctor/assets/stylesheet.css', @resolver.system_path('assets/stylesheet.css', '//QA/c$/users/asciidoctor') end + test 'resolves UNC path if target is UNC path' do + @resolver.file_separator = '\\' + assert_equal '//server/docs/output.html', @resolver.system_path('\\\\server\\docs\\output.html') + end + + test 'resolves UNC path if target is posix-style UNC path' do + assert_equal '//server/docs/output.html', @resolver.system_path('//server/docs/output.html') + end + test 'resolves relative target relative to current directory if start is empty' do pwd = File.expand_path(Dir.pwd) assert_equal "#{pwd}/images/tiger.png", @resolver.system_path('images/tiger.png', '') assert_equal "#{pwd}/images/tiger.png", @resolver.system_path('images/tiger.png', nil) + assert_equal "#{pwd}/images/tiger.png", @resolver.system_path('images/tiger.png') end test 'resolves relative hidden target relative to current directory if start is empty' do @@ -189,23 +307,35 @@ assert_equal "#{pwd}/.images/tiger.png", @resolver.system_path('.images/tiger.png', nil) end - test 'resolves and normalizes start with target is empty' do - pwd = File.expand_path(Dir.pwd) - assert_equal '/home/doctor/docs', @resolver.system_path('', '/home/doctor/docs') - assert_equal '/home/doctor/docs', @resolver.system_path(nil, '/home/doctor/docs') - assert_equal "#{pwd}/assets/images", @resolver.system_path(nil, 'assets/images') - assert_equal "#{JAIL}/assets/images", @resolver.system_path('', '../assets/images', JAIL) + test 'resolves and normalizes start when target is empty' do + pwd = File.expand_path Dir.pwd + assert_equal '/home/doctor/docs', (@resolver.system_path '', '/home/doctor/docs') + assert_equal '/home/doctor/docs', (@resolver.system_path '', '/home/doctor/./docs') + assert_equal '/home/doctor/docs', (@resolver.system_path nil, '/home/doctor/docs') + assert_equal '/home/doctor/docs', (@resolver.system_path nil, '/home/doctor/./docs') + assert_equal %(#{pwd}/assets/images), (@resolver.system_path nil, 'assets/images') + @resolver.system_path '', '../assets/images', JAIL + assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' end - test 'posixfies windows paths' do + test 'posixifies windows paths' do + @resolver.file_separator = '\\' assert_equal "#{JAIL}/assets/css", @resolver.system_path('..\\css', 'assets\\stylesheets', JAIL) end test 'resolves windows paths when file separator is backlash' do @resolver.file_separator = '\\' - assert_equal 'C:/data/docs', @resolver.system_path('..', "C:\\data\\docs\\assets", 'C:\\data\\docs') - assert_equal 'C:/data/docs', @resolver.system_path('..\\..', "C:\\data\\docs\\assets", 'C:\\data\\docs') - assert_equal 'C:/data/docs/css', @resolver.system_path('..\\..\\css', "C:\\data\\docs\\assets", 'C:\\data\\docs') + + assert_equal 'C:/data/docs', (@resolver.system_path '..', 'C:\\data\\docs\\assets', 'C:\\data\\docs') + + result = @resolver.system_path '..\\..', 'C:\\data\\docs\\assets', 'C:\\data\\docs' + assert_equal 'C:/data/docs', result + assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' + + @logger.clear + result = @resolver.system_path '..\\..\\css', 'C:\\data\\docs\\assets', 'C:\\data\\docs' + assert_equal 'C:/data/docs/css', result + assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' end test 'should calculate relative path' do @@ -214,9 +344,23 @@ assert_equal 'part1/chapter1/section1.adoc', @resolver.relative_path(filename, JAIL) end + test 'should resolve relative path to filename outside of base directory' do + filename = '/home/shared/partials' + base_dir = '/home/user/docs' + result = @resolver.relative_path filename, base_dir + assert_equal '../../shared/partials', result + end + + test 'should return original path if relative path cannot be computed' do + filename = 'D:/path/to/include/file.txt' + base_dir = 'C:/docs' + result = @resolver.relative_path filename, base_dir + assert_equal 'D:/path/to/include/file.txt', result + end if windows? + test 'should resolve relative path relative to base dir in unsafe mode' do base_dir = fixture_path 'base' - doc = empty_document :base_dir => base_dir, :safe => Asciidoctor::SafeMode::UNSAFE + doc = empty_document base_dir: base_dir, safe: Asciidoctor::SafeMode::UNSAFE expected = ::File.join base_dir, 'images', 'tiger.png' actual = doc.normalize_system_path 'tiger.png', 'images' assert_equal expected, actual @@ -224,32 +368,9 @@ test 'should resolve absolute path as absolute in unsafe mode' do base_dir = fixture_path 'base' - doc = empty_document :base_dir => base_dir, :safe => Asciidoctor::SafeMode::UNSAFE + doc = empty_document base_dir: base_dir, safe: Asciidoctor::SafeMode::UNSAFE actual = doc.normalize_system_path 'tiger.png', '/etc/images' assert_equal '/etc/images/tiger.png', actual end end - - context 'Helpers' do - test 'rootname should return file name without extension' do - assert_equal 'master', Asciidoctor::Helpers.rootname('master.adoc') - assert_equal 'docs/master', Asciidoctor::Helpers.rootname('docs/master.adoc') - end - - test 'rootname should file name if it has no extension' do - assert_equal 'master', Asciidoctor::Helpers.rootname('master') - assert_equal 'docs/master', Asciidoctor::Helpers.rootname('docs/master') - end - - test 'UriSniffRx should detect URIs' do - assert Asciidoctor::UriSniffRx =~ 'http://example.com' - assert Asciidoctor::UriSniffRx =~ 'https://example.com' - assert Asciidoctor::UriSniffRx =~ 'data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=' - end - - test 'UriSniffRx should not detect an absolute Windows path as a URI' do - assert Asciidoctor::UriSniffRx !~ 'c:/sample.adoc' - assert Asciidoctor::UriSniffRx !~ 'c:\\sample.adoc' - end - end end diff -Nru asciidoctor-1.5.5/test/preamble_test.rb asciidoctor-2.0.10/test/preamble_test.rb --- asciidoctor-1.5.5/test/preamble_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/preamble_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,22 +1,18 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context 'Preamble' do - test 'title and single paragraph preamble before section' do - input = <<-EOS -= Title + input = <<~'EOS' + = Title -Preamble paragraph 1. + Preamble paragraph 1. -== First Section + == First Section -Section paragraph 1. + Section paragraph 1. EOS - result = render_string(input) + result = convert_string(input) assert_xpath '//p', result, 2 assert_xpath '//*[@id="preamble"]', result, 1 assert_xpath '//*[@id="preamble"]//p', result, 1 @@ -25,51 +21,51 @@ end test 'title of preface is blank by default in DocBook output' do - input = <<-EOS -= Document Title -:doctype: book + input = <<~'EOS' + = Document Title + :doctype: book -Preface content. + Preface content. -== First Section + == First Section -Section content. + Section content. EOS - result = render_string input, :backend => :docbook + result = convert_string input, backend: :docbook assert_xpath '//preface/title', result, 1 title_node = xmlnodes_at_xpath '//preface/title', result, 1 assert_equal '', title_node.text end test 'preface-title attribute is assigned as title of preface in DocBook output' do - input = <<-EOS -= Document Title -:doctype: book -:preface-title: Preface + input = <<~'EOS' + = Document Title + :doctype: book + :preface-title: Preface -Preface content. + Preface content. -== First Section + == First Section -Section content. + Section content. EOS - result = render_string input, :backend => :docbook + result = convert_string input, backend: :docbook assert_xpath '//preface/title[text()="Preface"]', result, 1 end test 'title and multi-paragraph preamble before section' do - input = <<-EOS -= Title + input = <<~'EOS' + = Title -Preamble paragraph 1. + Preamble paragraph 1. -Preamble paragraph 2. + Preamble paragraph 2. -== First Section + == First Section -Section paragraph 1. + Section paragraph 1. EOS - result = render_string(input) + result = convert_string(input) assert_xpath '//p', result, 3 assert_xpath '//*[@id="preamble"]', result, 1 assert_xpath '//*[@id="preamble"]//p', result, 2 @@ -78,96 +74,96 @@ end test 'should not wrap content in preamble if document has title but no sections' do - input = <<-EOS -= Title + input = <<~'EOS' + = Title -paragraph + paragraph EOS - result = render_string(input) + result = convert_string(input) assert_xpath '//p', result, 1 assert_xpath '//*[@id="content"]/*[@class="paragraph"]/p', result, 1 assert_xpath '//*[@id="content"]/*[@class="paragraph"]/following-sibling::*', result, 0 end test 'title and section without preamble' do - input = <<-EOS -= Title + input = <<~'EOS' + = Title -== First Section + == First Section -Section paragraph 1. + Section paragraph 1. EOS - result = render_string(input) + result = convert_string(input) assert_xpath '//p', result, 1 assert_xpath '//*[@id="preamble"]', result, 0 assert_xpath '//h2[@id="_first_section"]', result, 1 end test 'no title with preamble and section' do - input = <<-EOS -Preamble paragraph 1. + input = <<~'EOS' + Preamble paragraph 1. -== First Section + == First Section -Section paragraph 1. + Section paragraph 1. EOS - result = render_string(input) + result = convert_string(input) assert_xpath '//p', result, 2 assert_xpath '//*[@id="preamble"]', result, 0 assert_xpath '//h2[@id="_first_section"]/preceding::p', result, 1 end test 'preamble in book doctype' do - input = <<-EOS -= Book -:doctype: book + input = <<~'EOS' + = Book + :doctype: book -Back then... + Back then... -= Chapter One + = Chapter One -[partintro] -It was a dark and stormy night... + [partintro] + It was a dark and stormy night... -== Scene One + == Scene One -Someone's gonna get axed. + Someone's gonna get axed. -= Chapter Two + = Chapter Two -[partintro] -They couldn't believe their eyes when... + [partintro] + They couldn't believe their eyes when... -== Scene One + == Scene One -The axe came swinging. + The axe came swinging. EOS d = document_from_string(input) assert_equal 'book', d.doctype - output = d.render + output = d.convert assert_xpath '//h1', output, 3 - assert_xpath %{//*[@id="preamble"]//p[text() = "Back then#{expand_entity 8230}#{expand_entity 8203}"]}, output, 1 + assert_xpath %{//*[@id="preamble"]//p[text() = "Back then#{decode_char 8230}#{decode_char 8203}"]}, output, 1 end - test 'should render table of contents in preamble if toc-placement attribute value is preamble' do - input = <<-EOS -= Article -:toc: -:toc-placement: preamble + test 'should output table of contents in preamble if toc-placement attribute value is preamble' do + input = <<~'EOS' + = Article + :toc: + :toc-placement: preamble -Once upon a time... + Once upon a time... -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... - EOS + They couldn't believe their eyes when... + EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="preamble"]/*[@id="toc"]', output, 1 end end diff -Nru asciidoctor-1.5.5/test/reader_test.rb asciidoctor-2.0.10/test/reader_test.rb --- asciidoctor-1.5.5/test/reader_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/reader_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,17 +1,10 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' class ReaderTest < Minitest::Test - DIRNAME = File.expand_path(File.dirname(__FILE__)) + DIRNAME = ASCIIDOCTOR_TEST_DIR - SAMPLE_DATA = <<-EOS.chomp.split(::Asciidoctor::EOL) -first line -second line -third line - EOS + SAMPLE_DATA = ['first line', 'second line', 'third line'] context 'Reader' do context 'Prepare lines' do @@ -21,51 +14,72 @@ end test 'should prepare lines from String data' do - reader = Asciidoctor::Reader.new SAMPLE_DATA + reader = Asciidoctor::Reader.new SAMPLE_DATA.join(Asciidoctor::LF) assert_equal SAMPLE_DATA, reader.lines end test 'should remove UTF-8 BOM from first line of String data' do - data = "\xef\xbb\xbf#{SAMPLE_DATA.join ::Asciidoctor::EOL}" - reader = Asciidoctor::Reader.new data, nil, :normalize => true - assert_equal 'f', reader.lines.first[0..0] - assert_equal SAMPLE_DATA, reader.lines + ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| + data = String.new %(\xef\xbb\xbf#{SAMPLE_DATA.join ::Asciidoctor::LF}), encoding: start_encoding + reader = Asciidoctor::Reader.new data, nil, normalize: true + assert_equal Encoding::UTF_8, reader.lines[0].encoding + assert_equal 'f', reader.lines[0].chr + assert_equal SAMPLE_DATA, reader.lines + end end test 'should remove UTF-8 BOM from first line of Array data' do - data = SAMPLE_DATA.dup - data[0] = "\xef\xbb\xbf#{data.first}" - reader = Asciidoctor::Reader.new data, nil, :normalize => true - assert_equal 'f', reader.lines.first[0..0] - assert_equal SAMPLE_DATA, reader.lines + ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| + data = SAMPLE_DATA.drop 0 + data[0] = String.new %(\xef\xbb\xbf#{data.first}), encoding: start_encoding + reader = Asciidoctor::Reader.new data, nil, normalize: true + assert_equal Encoding::UTF_8, reader.lines[0].encoding + assert_equal 'f', reader.lines[0].chr + assert_equal SAMPLE_DATA, reader.lines + end end - if Asciidoctor::COERCE_ENCODING - test 'should encode UTF-16LE string to UTF-8 when BOM is found' do - data = "\uFEFF#{SAMPLE_DATA.join ::Asciidoctor::EOL}".encode('UTF-16LE').force_encoding('UTF-8') - reader = Asciidoctor::Reader.new data, nil, :normalize => true - assert_equal 'f', reader.lines.first[0..0] + test 'should encode UTF-16LE string to UTF-8 when BOM is found' do + ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| + data = "\ufeff#{SAMPLE_DATA.join ::Asciidoctor::LF}".encode('UTF-16LE').force_encoding(start_encoding) + reader = Asciidoctor::Reader.new data, nil, normalize: true + assert_equal Encoding::UTF_8, reader.lines[0].encoding + assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end + end - test 'should encode UTF-16LE string array to UTF-8 when BOM is found' do - data = "\uFEFF#{SAMPLE_DATA.join ::Asciidoctor::EOL}".encode('UTF-16LE').force_encoding('UTF-8').lines.to_a - reader = Asciidoctor::Reader.new data, nil, :normalize => true - assert_equal 'f', reader.lines.first[0..0] + test 'should encode UTF-16LE string array to UTF-8 when BOM is found' do + ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| + # NOTE can't split a UTF-16LE string using .lines when encoding is set to UTF-8 + data = SAMPLE_DATA.drop 0 + data.unshift %(\ufeff#{data.shift}) + data.each {|line| (line.encode 'UTF-16LE').force_encoding start_encoding } + reader = Asciidoctor::Reader.new data, nil, normalize: true + assert_equal Encoding::UTF_8, reader.lines[0].encoding + assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end + end - test 'should encode UTF-16BE string to UTF-8 when BOM is found' do - data = "\uFEFF#{SAMPLE_DATA.join ::Asciidoctor::EOL}".encode('UTF-16BE').force_encoding('UTF-8') - reader = Asciidoctor::Reader.new data, nil, :normalize => true - assert_equal 'f', reader.lines.first[0..0] + test 'should encode UTF-16BE string to UTF-8 when BOM is found' do + ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| + data = "\ufeff#{SAMPLE_DATA.join ::Asciidoctor::LF}".encode('UTF-16BE').force_encoding(start_encoding) + reader = Asciidoctor::Reader.new data, nil, normalize: true + assert_equal Encoding::UTF_8, reader.lines[0].encoding + assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end + end - test 'should encode UTF-16BE string array to UTF-8 when BOM is found' do - data = "\uFEFF#{SAMPLE_DATA.join ::Asciidoctor::EOL}".encode('UTF-16BE').force_encoding('UTF-8').lines.to_a - reader = Asciidoctor::Reader.new data, nil, :normalize => true - assert_equal 'f', reader.lines.first[0..0] + test 'should encode UTF-16BE string array to UTF-8 when BOM is found' do + ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| + data = SAMPLE_DATA.drop 0 + data.unshift %(\ufeff#{data.shift}) + data = data.map {|line| (line.encode 'UTF-16BE').force_encoding start_encoding } + reader = Asciidoctor::Reader.new data, nil, normalize: true + assert_equal Encoding::UTF_8, reader.lines[0].encoding + assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end end @@ -73,7 +87,7 @@ context 'With empty data' do test 'has_more_lines? should return false with empty data' do - assert !Asciidoctor::Reader.new.has_more_lines? + refute Asciidoctor::Reader.new.has_more_lines? end test 'empty? should return true with empty data' do @@ -90,7 +104,7 @@ end test 'peek_lines should return empty Array with empty data' do - assert_equal [], Asciidoctor::Reader.new.peek_lines + assert_equal [], Asciidoctor::Reader.new.peek_lines(1) end test 'read_line should return nil with empty data' do @@ -112,13 +126,13 @@ test 'empty? should return false if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA - assert !reader.empty? - assert !reader.eof? + refute reader.empty? + refute reader.eof? end test 'next_line_empty? should return false if next line is not blank' do reader = Asciidoctor::Reader.new SAMPLE_DATA - assert !reader.next_line_empty? + refute reader.next_line_empty? end test 'next_line_empty? should return true if next line is blank' do @@ -150,6 +164,18 @@ assert_equal 1, reader.lineno end + test 'peek_lines should not increment line number if reader overruns buffer' do + reader = Asciidoctor::Reader.new SAMPLE_DATA + assert_equal SAMPLE_DATA, (reader.peek_lines SAMPLE_DATA.size * 2) + assert_equal 1, reader.lineno + end + + test 'peek_lines should peek all lines if no arguments are given' do + reader = Asciidoctor::Reader.new SAMPLE_DATA + assert_equal SAMPLE_DATA, reader.peek_lines + assert_equal 1, reader.lineno + end + test 'peek_lines should not invert order of lines' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, reader.lines @@ -174,7 +200,7 @@ assert reader.advance assert reader.advance assert reader.advance - assert !reader.advance + refute reader.advance end test 'read_lines should return all lines' do @@ -184,17 +210,17 @@ test 'read should return all lines joined as String' do reader = Asciidoctor::Reader.new SAMPLE_DATA - assert_equal SAMPLE_DATA.join(::Asciidoctor::EOL), reader.read + assert_equal SAMPLE_DATA.join(::Asciidoctor::LF), reader.read end test 'has_more_lines? should return false after read_lines is invoked' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_lines - assert !reader.has_more_lines? + refute reader.has_more_lines? end test 'unshift puts line onto Reader as next line to read' do - reader = Asciidoctor::Reader.new SAMPLE_DATA, nil, :normalize => true + reader = Asciidoctor::Reader.new SAMPLE_DATA, nil, normalize: true reader.unshift 'line zero' assert_equal 'line zero', reader.peek_line assert_equal 'line zero', reader.read_line @@ -229,85 +255,84 @@ test 'source should return original data Array joined as String' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_lines - assert_equal SAMPLE_DATA.join(::Asciidoctor::EOL), reader.source + assert_equal SAMPLE_DATA.join(::Asciidoctor::LF), reader.source end end context 'Line context' do - test 'to_s should return file name and line number of current line' do + test 'cursor.to_s should return file name and line number of current line' do reader = Asciidoctor::Reader.new SAMPLE_DATA, 'sample.adoc' reader.read_line - assert_equal 'sample.adoc: line 2', reader.to_s + assert_equal 'sample.adoc: line 2', reader.cursor.to_s end test 'line_info should return file name and line number of current line' do reader = Asciidoctor::Reader.new SAMPLE_DATA, 'sample.adoc' reader.read_line assert_equal 'sample.adoc: line 2', reader.line_info - assert_equal 'sample.adoc: line 2', reader.next_line_info end - test 'prev_line_info should return file name and line number of previous line read' do + test 'cursor_at_prev_line should return file name and line number of previous line read' do reader = Asciidoctor::Reader.new SAMPLE_DATA, 'sample.adoc' reader.read_line - assert_equal 'sample.adoc: line 1', reader.prev_line_info + assert_equal 'sample.adoc: line 1', reader.cursor_at_prev_line.to_s end end context 'Read lines until' do test 'Read lines until until end' do - lines = <<-EOS.lines.entries -This is one paragraph. + lines = <<~'EOS'.lines + This is one paragraph. -This is another paragraph. + This is another paragraph. EOS - reader = Asciidoctor::Reader.new lines, nil, :normalize => true + reader = Asciidoctor::Reader.new lines, nil, normalize: true result = reader.read_lines_until assert_equal 3, result.size - assert_equal lines.map {|l| l.chomp }, result - assert !reader.has_more_lines? + assert_equal lines.map(&:chomp), result + refute reader.has_more_lines? assert reader.eof? end test 'Read lines until until blank line' do - lines = <<-EOS.lines.entries -This is one paragraph. + lines = <<~'EOS'.lines + This is one paragraph. -This is another paragraph. + This is another paragraph. EOS - reader = Asciidoctor::Reader.new lines, nil, :normalize => true - result = reader.read_lines_until :break_on_blank_lines => true + reader = Asciidoctor::Reader.new lines, nil, normalize: true + result = reader.read_lines_until break_on_blank_lines: true assert_equal 1, result.size assert_equal lines.first.chomp, result.first assert_equal lines.last.chomp, reader.peek_line end test 'Read lines until until blank line preserving last line' do - lines = <<-EOS.chomp.split(::Asciidoctor::EOL) -This is one paragraph. + lines = <<~'EOS'.split ::Asciidoctor::LF + This is one paragraph. -This is another paragraph. + This is another paragraph. EOS reader = Asciidoctor::Reader.new lines - result = reader.read_lines_until :break_on_blank_lines => true, :preserve_last_line => true + result = reader.read_lines_until break_on_blank_lines: true, preserve_last_line: true assert_equal 1, result.size assert_equal lines.first.chomp, result.first assert reader.next_line_empty? end test 'Read lines until until condition is true' do - lines = <<-EOS.chomp.split(::Asciidoctor::EOL) --- -This is one paragraph inside the block. + lines = <<~'EOS'.split ::Asciidoctor::LF + -- + This is one paragraph inside the block. -This is another paragraph inside the block. --- + This is another paragraph inside the block. + -- -This is a paragraph outside the block. + This is a paragraph outside the block. EOS reader = Asciidoctor::Reader.new lines @@ -319,42 +344,86 @@ end test 'Read lines until until condition is true, taking last line' do - lines = <<-EOS.chomp.split(::Asciidoctor::EOL) --- -This is one paragraph inside the block. + lines = <<~'EOS'.split ::Asciidoctor::LF + -- + This is one paragraph inside the block. -This is another paragraph inside the block. --- + This is another paragraph inside the block. + -- -This is a paragraph outside the block. + This is a paragraph outside the block. EOS reader = Asciidoctor::Reader.new lines reader.read_line - result = reader.read_lines_until(:read_last_line => true) {|line| line == '--' } + result = reader.read_lines_until(read_last_line: true) {|line| line == '--' } assert_equal 4, result.size assert_equal lines[1, 4], result assert reader.next_line_empty? end test 'Read lines until until condition is true, taking and preserving last line' do - lines = <<-EOS.chomp.split(::Asciidoctor::EOL) --- -This is one paragraph inside the block. + lines = <<~'EOS'.split ::Asciidoctor::LF + -- + This is one paragraph inside the block. -This is another paragraph inside the block. --- + This is another paragraph inside the block. + -- -This is a paragraph outside the block. + This is a paragraph outside the block. EOS reader = Asciidoctor::Reader.new lines reader.read_line - result = reader.read_lines_until(:read_last_line => true, :preserve_last_line => true) {|line| line == '--' } + result = reader.read_lines_until(read_last_line: true, preserve_last_line: true) {|line| line == '--' } assert_equal 4, result.size assert_equal lines[1, 4], result assert_equal '--', reader.peek_line end + + test 'read lines until terminator' do + lines = <<~'EOS'.lines + **** + captured + + also captured + **** + + not captured + EOS + + expected = ['captured', '', 'also captured'] + + doc = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new doc, lines, nil, normalize: true + terminator = reader.read_line + result = reader.read_lines_until terminator: terminator, skip_processing: true + assert_equal expected, result + refute reader.unterminated + end + + test 'should flag reader as unterminated if reader reaches end of source without finding terminator' do + lines = <<~'EOS'.lines + **** + captured + + also captured + + captured yet again + EOS + + expected = lines[1..-1].map(&:chomp) + + using_memory_logger do |logger| + doc = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new doc, lines, nil, normalize: true + terminator = reader.peek_line + result = reader.read_lines_until terminator: terminator, skip_first_line: true, skip_processing: true + assert_equal expected, result + assert reader.unterminated + assert_message logger, :WARN, ': line 1: unterminated **** block', Hash + end + end end end @@ -362,7 +431,7 @@ context 'Type hierarchy' do test 'PreprocessorReader should extend from Reader' do reader = empty_document.reader - assert reader.is_a?(Asciidoctor::Reader) + assert_kind_of Asciidoctor::PreprocessorReader, reader end test 'PreprocessorReader should invoke or emulate Reader initializer' do @@ -375,7 +444,7 @@ context 'Prepare lines' do test 'should prepare and normalize lines from Array data' do - data = SAMPLE_DATA.map {|line| line.chomp} + data = SAMPLE_DATA.drop 0 data.unshift '' data.push '' doc = Asciidoctor::Document.new data @@ -384,70 +453,73 @@ end test 'should prepare and normalize lines from String data' do - data = SAMPLE_DATA.map {|line| line.chomp} + data = SAMPLE_DATA.drop 0 data.unshift ' ' data.push ' ' - data_as_string = data * ::Asciidoctor::EOL + data_as_string = data * ::Asciidoctor::LF doc = Asciidoctor::Document.new data_as_string reader = doc.reader assert_equal SAMPLE_DATA, reader.lines end test 'should clean CRLF from end of lines' do - input = <<-EOS -source\r -with\r -CRLF\r -endlines\r - EOS + input = <<~EOS + source\r + with\r + CRLF\r + line endings\r + EOS - [input, input.lines.to_a, input.split(::Asciidoctor::EOL), input.split(::Asciidoctor::EOL).join(::Asciidoctor::EOL)].each do |lines| + [input, input.lines, input.split(::Asciidoctor::LF), input.split(::Asciidoctor::LF).join(::Asciidoctor::LF)].each do |lines| doc = Asciidoctor::Document.new lines reader = doc.reader reader.lines.each do |line| - assert !line.end_with?("\r"), "CRLF not properly cleaned for source lines: #{lines.inspect}" - assert !line.end_with?("\r\n"), "CRLF not properly cleaned for source lines: #{lines.inspect}" - assert !line.end_with?("\n"), "CRLF not properly cleaned for source lines: #{lines.inspect}" + refute line.end_with?("\r"), "CRLF not properly cleaned for source lines: #{lines.inspect}" + refute line.end_with?("\r\n"), "CRLF not properly cleaned for source lines: #{lines.inspect}" + refute line.end_with?("\n"), "CRLF not properly cleaned for source lines: #{lines.inspect}" end end end test 'should not skip front matter by default' do - input = <<-EOS ---- -layout: post -title: Document Title -author: username -tags: [ first, second ] ---- -= Document Title -Author Name + input = <<~'EOS' + --- + layout: post + title: Document Title + author: username + tags: [ first, second ] + --- + = Document Title + Author Name -preamble + preamble EOS doc = Asciidoctor::Document.new input reader = doc.reader - assert !doc.attributes.key?('front-matter') + refute doc.attributes.key?('front-matter') assert_equal '---', reader.peek_line - end + end + + test 'should skip front matter if specified by skip-front-matter attribute' do + front_matter = <<~'EOS'.chop + layout: post + title: Document Title + author: username + tags: [ first, second ] + EOS - test 'should skip front matter if specified by skip-front-matter attribute' do - front_matter = %(layout: post -title: Document Title -author: username -tags: [ first, second ]) - input = <<-EOS ---- -#{front_matter} ---- -= Document Title -Author Name + input = <<~EOS + --- + #{front_matter} + --- + = Document Title + Author Name -preamble + preamble EOS - doc = Asciidoctor::Document.new input, :attributes => {'skip-front-matter' => ''} + doc = Asciidoctor::Document.new input, attributes: { 'skip-front-matter' => '' } reader = doc.reader assert_equal '= Document Title', reader.peek_line assert_equal front_matter, doc.attributes['front-matter'] @@ -483,39 +555,94 @@ assert_nil reader.file assert_equal '', reader.path end + + test 'PreprocessorReader#push_include method should set path from file automatically if not specified' do + lines = %w(a b c) + doc = Asciidoctor::Document.new lines + reader = doc.reader + append_lines = %w(one two three) + reader.push_include append_lines, '/tmp/lines.adoc' + assert_equal '/tmp/lines.adoc', reader.file + assert_equal 'lines.adoc', reader.path + assert doc.catalog[:includes]['lines'] + end + + test 'PreprocessorReader#push_include method should accept file as a URI and compute dir and path' do + file_uri = ::URI.parse 'http://example.com/docs/file.adoc' + dir_uri = ::URI.parse 'http://example.com/docs' + reader = empty_document.reader + reader.push_include %w(one two three), file_uri + assert_same file_uri, reader.file + assert_equal dir_uri, reader.dir + assert_equal 'file.adoc', reader.path + end + + test 'PreprocessorReader#push_include method should accept file as a top-level URI and compute dir and path' do + file_uri = ::URI.parse 'http://example.com/index.adoc' + dir_uri = ::URI.parse 'http://example.com' + reader = empty_document.reader + reader.push_include %w(one two three), file_uri + assert_same file_uri, reader.file + assert_equal dir_uri, reader.dir + assert_equal 'index.adoc', reader.path + end + + test 'PreprocessorReader#push_include method should not fail if data is nil' do + lines = %w(a b c) + doc = Asciidoctor::Document.new lines + reader = doc.reader + reader.push_include nil, '', '' + assert_equal 0, reader.include_stack.size + assert_equal 'a', reader.read_line.rstrip + end + + test 'PreprocessorReader#push_include method should ignore dot in directory name when computing include path' do + lines = %w(a b c) + doc = Asciidoctor::Document.new lines + reader = doc.reader + append_lines = %w(one two three) + reader.push_include append_lines, nil, 'include.d/data' + assert_nil reader.file + assert_equal 'include.d/data', reader.path + assert doc.catalog[:includes]['include.d/data'] + end end context 'Include Directive' do test 'include directive is disabled by default and becomes a link' do - input = <<-EOS -include::include-file.asciidoc[] - EOS + input = 'include::include-file.adoc[]' doc = Asciidoctor::Document.new input reader = doc.reader - assert_equal 'link:include-file.asciidoc[]', reader.read_line + assert_equal 'link:include-file.adoc[]', reader.read_line end test 'include directive is enabled when safe mode is less than SECURE' do - input = <<-EOS -include::fixtures/include-file.asciidoc[] - EOS - - doc = document_from_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME - output = doc.render + input = 'include::fixtures/include-file.adoc[]' + doc = document_from_string input, safe: :safe, standalone: false, base_dir: DIRNAME + output = doc.convert assert_match(/included content/, output) + assert doc.catalog[:includes]['fixtures/include-file'] end - test 'include directive should resolve file with spaces in name' do - input = <<-EOS -include::fixtures/include file.asciidoc[] + test 'should not track include in catalog for non-AsciiDoc include files' do + input = <<~'EOS' + ---- + include::fixtures/circle.svg[] + ---- EOS - include_file = File.join DIRNAME, 'fixtures', 'include-file.asciidoc' - include_file_with_sp = File.join DIRNAME, 'fixtures', 'include file.asciidoc' + doc = document_from_string input, safe: :safe, standalone: false, base_dir: DIRNAME + assert doc.catalog[:includes].empty? + end + + test 'include directive should resolve file with spaces in name' do + input = 'include::fixtures/include file.adoc[]' + include_file = File.join DIRNAME, 'fixtures', 'include-file.adoc' + include_file_with_sp = File.join DIRNAME, 'fixtures', 'include file.adoc' begin FileUtils.cp include_file, include_file_with_sp - doc = document_from_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME - output = doc.render + doc = document_from_string input, safe: :safe, standalone: false, base_dir: DIRNAME + output = doc.convert assert_match(/included content/, output) ensure FileUtils.rm include_file_with_sp @@ -523,16 +650,13 @@ end test 'include directive should resolve file with {sp} in name' do - input = <<-EOS -include::fixtures/include{sp}file.asciidoc[] - EOS - - include_file = File.join DIRNAME, 'fixtures', 'include-file.asciidoc' - include_file_with_sp = File.join DIRNAME, 'fixtures', 'include file.asciidoc' + input = 'include::fixtures/include{sp}file.adoc[]' + include_file = File.join DIRNAME, 'fixtures', 'include-file.adoc' + include_file_with_sp = File.join DIRNAME, 'fixtures', 'include file.adoc' begin FileUtils.cp include_file, include_file_with_sp - doc = document_from_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME - output = doc.render + doc = document_from_string input, safe: :safe, standalone: false, base_dir: DIRNAME + output = doc.convert assert_match(/included content/, output) ensure FileUtils.rm include_file_with_sp @@ -540,18 +664,15 @@ end test 'include directive should resolve file relative to current include' do - input = <<-EOS -include::fixtures/parent-include.adoc[] - EOS - + input = 'include::fixtures/parent-include.adoc[]' pseudo_docfile = File.join DIRNAME, 'include-master.adoc' fixtures_dir = File.join DIRNAME, 'fixtures' parent_include_docfile = File.join fixtures_dir, 'parent-include.adoc' child_include_docfile = File.join fixtures_dir, 'child-include.adoc' grandchild_include_docfile = File.join fixtures_dir, 'grandchild-include.adoc' - doc = empty_safe_document :base_dir => DIRNAME - reader = Asciidoctor::PreprocessorReader.new doc, input, pseudo_docfile + doc = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new doc, input, pseudo_docfile, normalize: true assert_equal pseudo_docfile, reader.file assert_equal DIRNAME, reader.dir @@ -559,7 +680,7 @@ assert_equal 'first line of parent', reader.read_line - assert_equal 'fixtures/parent-include.adoc: line 1', reader.prev_line_info + assert_equal 'fixtures/parent-include.adoc: line 1', reader.cursor_at_prev_line.to_s assert_equal parent_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/parent-include.adoc', reader.path @@ -568,7 +689,7 @@ assert_equal 'first line of child', reader.read_line - assert_equal 'fixtures/child-include.adoc: line 1', reader.prev_line_info + assert_equal 'fixtures/child-include.adoc: line 1', reader.cursor_at_prev_line.to_s assert_equal child_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/child-include.adoc', reader.path @@ -577,7 +698,7 @@ assert_equal 'first line of grandchild', reader.read_line - assert_equal 'fixtures/grandchild-include.adoc: line 1', reader.prev_line_info + assert_equal 'fixtures/grandchild-include.adoc: line 1', reader.cursor_at_prev_line.to_s assert_equal grandchild_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/grandchild-include.adoc', reader.path @@ -594,24 +715,74 @@ assert_equal 'last line of parent', reader.read_line - assert_equal 'fixtures/parent-include.adoc: line 5', reader.prev_line_info + assert_equal 'fixtures/parent-include.adoc: line 5', reader.cursor_at_prev_line.to_s assert_equal parent_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/parent-include.adoc', reader.path end + test 'include directive should process lines when file extension of target is .asciidoc' do + input = 'include::fixtures/include-alt-extension.asciidoc[]' + doc = document_from_string input, safe: :safe, base_dir: DIRNAME + assert_equal 3, doc.blocks.size + assert_equal ['first line'], doc.blocks[0].lines + assert_equal ['Asciidoctor!'], doc.blocks[1].lines + assert_equal ['last line'], doc.blocks[2].lines + end + + test 'unresolved target referenced by include directive is skipped when optional option is set' do + input = <<~'EOS' + include::fixtures/{no-such-file}[opts=optional] + + trailing content + EOS + + begin + using_memory_logger do |logger| + doc = document_from_string input, safe: :safe, base_dir: DIRNAME + assert_equal 1, doc.blocks.size + assert_equal ['trailing content'], doc.blocks[0].lines + assert_message logger, :INFO, '~: line 1: optional include dropped because include file not found', Hash + end + rescue + flunk 'include directive should not raise exception on unresolved target' + end + end + + test 'missing file referenced by include directive is skipped when optional option is set' do + input = <<~'EOS' + include::fixtures/no-such-file.adoc[opts=optional] + + trailing content + EOS + + begin + using_memory_logger do |logger| + doc = document_from_string input, safe: :safe, base_dir: DIRNAME + assert_equal 1, doc.blocks.size + assert_equal ['trailing content'], doc.blocks[0].lines + assert_message logger, :INFO, '~: line 1: optional include dropped because include file not found', Hash + end + rescue + flunk 'include directive should not raise exception on missing file' + end + end + test 'missing file referenced by include directive is replaced by warning' do - input = <<-EOS -include::fixtures/no-such-file.adoc[] + input = <<~'EOS' + include::fixtures/no-such-file.adoc[] -trailing content + trailing content EOS begin - doc = document_from_string input, :safe => :safe, :base_dir => DIRNAME - assert_equal 2, doc.blocks.size - assert_equal ['Unresolved directive in - include::fixtures/no-such-file.adoc[]'], doc.blocks[0].lines - assert_equal ['trailing content'], doc.blocks[1].lines + using_memory_logger do |logger| + doc = document_from_string input, safe: :safe, base_dir: DIRNAME + assert_equal 2, doc.blocks.size + assert_equal ['Unresolved directive in - include::fixtures/no-such-file.adoc[]'], doc.blocks[0].lines + assert_equal ['trailing content'], doc.blocks[1].lines + assert_message logger, :ERROR, '~: line 1: include file not found', Hash + end rescue flunk 'include directive should not raise exception on missing file' end @@ -620,17 +791,20 @@ test 'unreadable file referenced by include directive is replaced by warning' do include_file = File.join DIRNAME, 'fixtures', 'chapter-a.adoc' FileUtils.chmod 0000, include_file - input = <<-EOS -include::fixtures/chapter-a.adoc[] + input = <<~'EOS' + include::fixtures/chapter-a.adoc[] -trailing content + trailing content EOS begin - doc = document_from_string input, :safe => :safe, :base_dir => DIRNAME - assert_equal 2, doc.blocks.size - assert_equal ['Unresolved directive in - include::fixtures/chapter-a.adoc[]'], doc.blocks[0].lines - assert_equal ['trailing content'], doc.blocks[1].lines + using_memory_logger do |logger| + doc = document_from_string input, safe: :safe, base_dir: DIRNAME + assert_equal 2, doc.blocks.size + assert_equal ['Unresolved directive in - include::fixtures/chapter-a.adoc[]'], doc.blocks[0].lines + assert_equal ['trailing content'], doc.blocks[1].lines + assert_message logger, :ERROR, '~: line 1: include file not readable', Hash + end rescue flunk 'include directive should not raise exception on missing file' ensure @@ -641,58 +815,148 @@ # IMPORTANT this test needs to be run on Windows to verify proper behavior in Windows test 'can resolve include directive with absolute path' do include_path = ::File.join DIRNAME, 'fixtures', 'chapter-a.adoc' - input = <<-EOS -include::#{include_path}[] - EOS - result = document_from_string input, :safe => :safe + input = %(include::#{include_path}[]) + result = document_from_string input, safe: :safe assert_equal 'Chapter A', result.doctitle - result = document_from_string input, :safe => :unsafe, :base_dir => ::Dir.tmpdir + result = document_from_string input, safe: :unsafe, base_dir: ::Dir.tmpdir assert_equal 'Chapter A', result.doctitle end test 'include directive can retrieve data from uri' do - #url = 'http://echo.jsontest.com/name/asciidoctor' url = %(http://#{resolve_localhost}:9876/name/asciidoctor) - input = <<-EOS -.... -include::#{url}[] -.... + input = <<~EOS + .... + include::#{url}[] + .... EOS expect = /\{"name": "asciidoctor"\}/ output = using_test_webserver do - render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} + convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end refute_nil output assert_match(expect, output) end + test 'nested include directives are resolved relative to current file' do + input = <<~'EOS' + .... + include::fixtures/outer-include.adoc[] + .... + EOS + + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + expected = <<~'EOS'.chop + first line of outer + + first line of middle + + first line of inner + + last line of inner + + last line of middle + + last line of outer + EOS + assert_includes output, expected + end + + test 'nested remote include directive is resolved relative to uri of current file' do + url = %(http://#{resolve_localhost}:9876/fixtures/outer-include.adoc) + input = <<~EOS + .... + include::#{url}[] + .... + EOS + output = using_test_webserver do + convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } + end + + expected = <<~'EOS'.chop + first line of outer + + first line of middle + + first line of inner + + last line of inner + + last line of middle + + last line of outer + EOS + assert_includes output, expected + end + + test 'nested remote include directive that cannot be resolved does not crash processor' do + include_url = %(http://#{resolve_localhost}:9876/fixtures/file-with-missing-include.adoc) + nested_include_url = 'no-such-file.adoc' + input = <<~EOS + .... + include::#{include_url}[] + .... + EOS + begin + using_memory_logger do |logger| + result = using_test_webserver do + convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } + end + assert_includes result, %(Unresolved directive in #{include_url} - include::#{nested_include_url}[]) + assert_message logger, :ERROR, %(#{include_url}: line 1: include uri not readable: http://#{resolve_localhost}:9876/fixtures/#{nested_include_url}), Hash + end + rescue + flunk 'include directive should not raise exception on missing file' + end + end + + test 'tag filtering is supported for remote includes' do + url = %(http://#{resolve_localhost}:9876/fixtures/tagged-class.rb) + input = <<~EOS + [source,ruby] + ---- + include::#{url}[tag=init,indent=0] + ---- + EOS + output = using_test_webserver do + convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } + end + + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + def initialize breed + @breed = breed + end + EOS + assert_includes output, expected + end + test 'inaccessible uri referenced by include directive does not crash processor' do url = %(http://#{resolve_localhost}:9876/no_such_file) - input = <<-EOS -.... -include::#{url}[] -.... + input = <<~EOS + .... + include::#{url}[] + .... EOS - output = begin - using_test_webserver do - render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} + begin + using_memory_logger do |logger| + output = using_test_webserver do + convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } + end + refute_nil output + assert_match(/Unresolved directive/, output) + assert_message logger, :ERROR, %(: line 2: include uri not readable: #{url}), Hash end rescue flunk 'include directive should not raise exception on inaccessible uri' end - refute_nil output - assert_match(/Unresolved directive/, output) end - test 'include directive supports line selection' do - input = <<-EOS -include::fixtures/include-file.asciidoc[lines=1;3..4;6..-1] - EOS - - output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME + test 'include directive supports selecting lines by line number' do + input = 'include::fixtures/include-file.adoc[lines=1;3..4;6..-1]' + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/first line/, output) refute_match(/second line/, output) assert_match(/third line/, output) @@ -704,12 +968,9 @@ assert_match(/last line of included content/, output) end - test 'include directive supports line selection using quoted attribute value' do - input = <<-EOS -include::fixtures/include-file.asciidoc[lines="1, 3..4 , 6 .. -1"] - EOS - - output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME + test 'include directive supports line ranges specified in quoted attribute value' do + input = 'include::fixtures/include-file.adoc[lines="1, 3..4 , 6 .. -1"]' + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/first line/, output) refute_match(/second line/, output) assert_match(/third line/, output) @@ -721,106 +982,386 @@ assert_match(/last line of included content/, output) end - test 'include directive supports tagged selection' do - input = <<-EOS -include::fixtures/include-file.asciidoc[tag=snippetA] + test 'include directive supports implicit endless range' do + input = 'include::fixtures/include-file.adoc[lines=6..]' + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + refute_match(/first line/, output) + refute_match(/second line/, output) + refute_match(/third line/, output) + refute_match(/fourth line/, output) + refute_match(/fifth line/, output) + assert_match(/sixth line/, output) + assert_match(/seventh line/, output) + assert_match(/eighth line/, output) + assert_match(/last line of included content/, output) + end + + test 'include directive ignores empty lines attribute' do + input = <<~'EOS' + ++++ + include::fixtures/include-file.adoc[lines=] + ++++ EOS - output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + assert_includes output, 'first line of included content' + assert_includes output, 'last line of included content' + end + + test 'include directive supports selecting lines by tag' do + input = 'include::fixtures/include-file.adoc[tag=snippetA]' + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/snippetA content/, output) refute_match(/snippetB content/, output) refute_match(/non-tagged content/, output) refute_match(/included content/, output) end - test 'include directive supports multiple tagged selection' do - input = <<-EOS -include::fixtures/include-file.asciidoc[tags=snippetA;snippetB] - EOS - - output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME + test 'include directive supports selecting lines by tags' do + input = 'include::fixtures/include-file.adoc[tags=snippetA;snippetB]' + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/snippetA content/, output) assert_match(/snippetB content/, output) refute_match(/non-tagged content/, output) refute_match(/included content/, output) end - test 'include directive supports tagged selection in XML file' do - input = <<-EOS -[source,xml,indent=0] ----- -include::fixtures/include-file.xml[tag=snippet] ----- + test 'include directive supports selecting lines by tag in language that uses circumfix comments' do + { + 'include-file.xml' => 'content', + 'include-file.ml' => 'let s = SS.empty;;', + 'include-file.jsx' => '

    Welcome to the club.

    ', + }.each do |filename, expect| + input = <<~EOS + [source,xml] + ---- + include::fixtures/#{filename}[tag=snippet,indent=0] + ---- + EOS + + doc = document_from_string input, safe: :safe, base_dir: DIRNAME + assert_equal expect, doc.blocks[0].source + end + end + + test 'include directive supports selecting tagged lines in file that has CRLF line endings' do + begin + tmp_include = Tempfile.new %w(include- .adoc) + tmp_include_dir, tmp_include_path = File.split tmp_include.path + tmp_include.write %(do not include\r\ntag::include-me[]\r\nincluded line\r\nend::include-me[]\r\ndo not include\r\n) + tmp_include.close + input = %(include::#{tmp_include_path}[tag=include-me]) + output = convert_string_to_embedded input, safe: :safe, base_dir: tmp_include_dir + assert_includes output, 'included line' + refute_includes output, 'do not include' + ensure + tmp_include.close! + end + end + + test 'include directive finds closing tag on last line of file without a trailing newline' do + begin + tmp_include = Tempfile.new %w(include- .adoc) + tmp_include_dir, tmp_include_path = File.split tmp_include.path + tmp_include.write %(line not included\ntag::include-me[]\nline included\nend::include-me[]) + tmp_include.close + input = %(include::#{tmp_include_path}[tag=include-me]) + using_memory_logger do |logger| + output = convert_string_to_embedded input, safe: :safe, base_dir: tmp_include_dir + assert_empty logger.messages + assert_includes output, 'line included' + refute_includes output, 'line not included' + end + ensure + tmp_include.close! + end + end + + test 'include directive does not select lines with tag directives within selected tag region' do + input = <<~'EOS' + ++++ + include::fixtures/include-file.adoc[tags=snippet] + ++++ + EOS + + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + expected = <<~'EOS'.chop + snippetA content + + non-tagged content + + snippetB content + EOS + assert_equal expected, output + end + + test 'include directive skips lines marked with negated tags' do + input = <<~'EOS' + ---- + include::fixtures/tagged-class-enclosed.rb[tags=all;!bark] + ---- + EOS + + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + class Dog + def initialize breed + @breed = breed + end + end + EOS + assert_includes output, expected + end + + test 'include directive takes all lines without tag directives when value is double asterisk' do + input = <<~'EOS' + ---- + include::fixtures/tagged-class.rb[tags=**] + ---- + EOS + + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + class Dog + def initialize breed + @breed = breed + end + + def bark + if @breed == 'beagle' + 'woof woof woof woof woof' + else + 'woof woof' + end + end + end EOS + assert_includes output, expected + end - output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME - assert_match('<snippet>content</snippet>', output) - refute_match('root', output) + test 'include directive takes all lines except negated tags when value contains double asterisk' do + input = <<~'EOS' + ---- + include::fixtures/tagged-class.rb[tags=**;!bark] + ---- + EOS + + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + class Dog + def initialize breed + @breed = breed + end + end + EOS + assert_includes output, expected end - test 'include directive does not select tagged lines inside tagged selection' do - input = <<-EOS -++++ -include::fixtures/include-file.asciidoc[tags=snippet] -++++ + test 'include directive selects lines for all tags when value of tags attribute is wildcard' do + input = <<~'EOS' + ---- + include::fixtures/tagged-class-enclosed.rb[tags=*] + ---- + EOS + + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + class Dog + def initialize breed + @breed = breed + end + + def bark + if @breed == 'beagle' + 'woof woof woof woof woof' + else + 'woof woof' + end + end + end EOS + assert_includes output, expected + end - output = render_embedded_string input, :safe => :safe, :base_dir => DIRNAME - expect = %(snippetA content + test 'include directive selects lines for all tags except exclusions when value of tags attribute is wildcard' do + input = <<~'EOS' + ---- + include::fixtures/tagged-class-enclosed.rb[tags=*;!init] + ---- + EOS + + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + class Dog + + def bark + if @breed == 'beagle' + 'woof woof woof woof woof' + else + 'woof woof' + end + end + end + EOS + assert_includes output, expected + end -non-tagged content + test 'include directive skips lines all tagged lines when value of tags attribute is negated wildcard' do + input = <<~'EOS' + ---- + include::fixtures/tagged-class.rb[tags=!*] + ---- + EOS -snippetB content) - assert_equal expect, output + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + expected = %(class Dog\nend) + assert_includes output, expected end - test 'should warn if tag is not found in include file' do - input = <<-EOS -include::fixtures/include-file.asciidoc[tag=snippetZ] + test 'include directive selects specified tagged lines and ignores the other tag directives' do + input = <<~'EOS' + [indent=0] + ---- + include::fixtures/tagged-class.rb[tags=bark;!bark-other] + ---- EOS - old_stderr = $stderr - $stderr = StringIO.new - begin - render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME - warning = $stderr.tap(&:rewind).read - refute_nil warning - assert_match(/WARNING.*snippetZ/, warning) - ensure - $stderr = old_stderr + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + expected = <<~EOS.chop + def bark + if @breed == 'beagle' + 'woof woof woof woof woof' + end end + EOS + assert_includes output, expected end - test 'lines attribute takes precedence over tags attribute in include directive' do - input = <<-EOS -include::fixtures/include-file.asciidoc[lines=1, tags=snippetA;snippetB] + test 'should warn if specified tag is not found in include file' do + input = 'include::fixtures/include-file.adoc[tag=no-such-tag]' + using_memory_logger do |logger| + convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + assert_message logger, :WARN, %(~: line 1: tag 'no-such-tag' not found in include file), Hash + end + end + + test 'should warn if specified tags are not found in include file' do + input = <<~'EOS' + ++++ + include::fixtures/include-file.adoc[tags=no-such-tag-b;no-such-tag-a] + ++++ + EOS + + using_memory_logger do |logger| + convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + expected_tags = 'no-such-tag-b, no-such-tag-a' + assert_message logger, :WARN, %(~: line 2: tags '#{expected_tags}' not found in include file), Hash + end + end + + test 'should warn if specified tag in include file is not closed' do + input = <<~'EOS' + ++++ + include::fixtures/unclosed-tag.adoc[tag=a] + ++++ + EOS + + using_memory_logger do |logger| + result = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + assert_equal 'a', result + assert_message logger, :WARN, %(~: line 2: detected unclosed tag 'a' starting at line 2 of include file), Hash + refute_nil logger.messages[0][:message][:include_location] + end + end + + test 'should warn if end tag in included file is mismatched' do + input = <<~'EOS' + ++++ + include::fixtures/mismatched-end-tag.adoc[tags=a;b] + ++++ EOS - output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME + inc_path = File.join DIRNAME, 'fixtures/mismatched-end-tag.adoc' + using_memory_logger do |logger| + result = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + assert_equal %(a\nb), result + assert_message logger, :WARN, %(: line 2: mismatched end tag (expected 'b' but found 'a') at line 5 of include file: #{inc_path}), Hash + refute_nil logger.messages[0][:message][:include_location] + end + end + + test 'should warn if unexpected end tag is found in included file' do + input = <<~'EOS' + ++++ + include::fixtures/unexpected-end-tag.adoc[tags=a] + ++++ + EOS + + inc_path = File.join DIRNAME, 'fixtures/unexpected-end-tag.adoc' + using_memory_logger do |logger| + result = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + assert_equal 'a', result + assert_message logger, :WARN, %(: line 2: unexpected end tag 'a' at line 4 of include file: #{inc_path}), Hash + refute_nil logger.messages[0][:message][:include_location] + end + end + + test 'include directive ignores tags attribute when empty' do + ['tag', 'tags'].each do |attr_name| + input = <<~EOS + ++++ + include::fixtures/include-file.xml[#{attr_name}=] + ++++ + EOS + + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + assert_match(/(?:tag|end)::/, output, 2) + end + end + + test 'lines attribute takes precedence over tags attribute in include directive' do + input = 'include::fixtures/include-file.adoc[lines=1, tags=snippetA;snippetB]' + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/first line of included content/, output) refute_match(/snippetA content/, output) refute_match(/snippetB content/, output) end test 'indent of included file can be reset to size of indent attribute' do - input = <<-EOS -[source, xml] ----- -include::fixtures/basic-docinfo.xml[lines=2..3, indent=0] ----- + input = <<~'EOS' + [source, xml] + ---- + include::fixtures/basic-docinfo.xml[lines=2..3, indent=0] + ---- EOS - output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal "2013\nAcme™, Inc.", result end - test 'should fall back to built-in include directive behavior when not handled by include processor' do - input = <<-EOS -include::fixtures/include-file.asciidoc[] + test 'should substitute attribute references in attrlist' do + input = <<~'EOS' + :name-of-tag: snippetA + include::fixtures/include-file.adoc[tag={name-of-tag}] EOS - include_processor = Class.new { + output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME + assert_match(/snippetA content/, output) + refute_match(/snippetB content/, output) + refute_match(/non-tagged content/, output) + refute_match(/included content/, output) + end + + test 'should fall back to built-in include directive behavior when not handled by include processor' do + input = 'include::fixtures/include-file.adoc[]' + include_processor = Class.new do def initialize document end @@ -831,204 +1372,210 @@ def process reader, target, attributes raise 'TestIncludeHandler should not have been invoked' end - } + end - document = empty_safe_document :base_dir => DIRNAME - reader = Asciidoctor::PreprocessorReader.new document, input + document = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new document, input, nil, normalize: true reader.instance_variable_set '@include_processors', [include_processor.new(document)] lines = reader.read_lines - source = lines * ::Asciidoctor::EOL + source = lines * ::Asciidoctor::LF assert_match(/included content/, source) end test 'leveloffset attribute entries should be added to content if leveloffset attribute is specified' do - input = <<-EOS -include::fixtures/master.adoc[] - EOS - - expected = <<-EOS.chomp.split(::Asciidoctor::EOL) -= Master Document + input = 'include::fixtures/master.adoc[]' + expected = <<~'EOS'.split ::Asciidoctor::LF + = Master Document -preamble + preamble -:leveloffset: +1 + :leveloffset: +1 -= Chapter A + = Chapter A -content + content -:leveloffset!: + :leveloffset!: EOS - document = Asciidoctor.load input, :safe => :safe, :base_dir => DIRNAME, :parse => false + document = Asciidoctor.load input, safe: :safe, base_dir: DIRNAME, parse: false assert_equal expected, document.reader.read_lines end test 'attributes are substituted in target of include directive' do - input = <<-EOS -:fixturesdir: fixtures -:ext: asciidoc + input = <<~'EOS' + :fixturesdir: fixtures + :ext: adoc -include::{fixturesdir}/include-file.{ext}[] + include::{fixturesdir}/include-file.{ext}[] EOS - doc = document_from_string input, :safe => :safe, :base_dir => DIRNAME - output = doc.render + doc = document_from_string input, safe: :safe, base_dir: DIRNAME + output = doc.convert assert_match(/included content/, output) end test 'line is skipped by default if target of include directive resolves to empty' do - input = <<-EOS -include::{foodir}/include-file.asciidoc[] - EOS - - doc = empty_safe_document :base_dir => DIRNAME - reader = Asciidoctor::PreprocessorReader.new doc, input - assert_equal 'Unresolved directive in - include::{foodir}/include-file.asciidoc[]', reader.read_line + input = 'include::{blank}[]' + using_memory_logger do |logger| + doc = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true + line = reader.read_line + assert_equal 'Unresolved directive in - include::{blank}[]', line + assert_message logger, :WARN, ': line 1: include dropped because resolved target is blank: include::{blank}[]', Hash + end end - test 'line is dropped if target of include directive resolves to empty and attribute-missing attribute is not skip' do - input = <<-EOS -include::{foodir}/include-file.asciidoc[] - EOS - - doc = empty_safe_document :base_dir => DIRNAME, :attributes => {'attribute-missing' => 'drop'} - reader = Asciidoctor::PreprocessorReader.new doc, input - assert_nil reader.read_line + test 'include is dropped if target contains missing attribute and attribute-missing is drop-line' do + input = 'include::{foodir}/include-file.adoc[]' + using_memory_logger Logger::INFO do |logger| + doc = empty_safe_document base_dir: DIRNAME, attributes: { 'attribute-missing' => 'drop-line' } + reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true + line = reader.read_line + assert_nil line + assert_messages logger, [ + [:INFO, 'dropping line containing reference to missing attribute: foodir'], + [:INFO, ': line 1: include dropped due to missing attribute: include::{foodir}/include-file.adoc[]', Hash], + ] + end end test 'line following dropped include is not dropped' do - input = <<-EOS -include::{foodir}/include-file.asciidoc[] -yo - EOS - - doc = empty_safe_document :base_dir => DIRNAME, :attributes => {'attribute-missing' => 'drop'} - reader = Asciidoctor::PreprocessorReader.new doc, input - assert_equal 'yo', reader.read_line + input = <<~'EOS' + include::{foodir}/include-file.adoc[] + yo + EOS + + using_memory_logger do |logger| + doc = empty_safe_document base_dir: DIRNAME, attributes: { 'attribute-missing' => 'warn' } + reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true + line = reader.read_line + assert_equal 'Unresolved directive in - include::{foodir}/include-file.adoc[]', line + line = reader.read_line + assert_equal 'yo', line + assert_messages logger, [ + [:INFO, 'dropping line containing reference to missing attribute: foodir'], + [:WARN, ': line 1: include dropped due to missing attribute: include::{foodir}/include-file.adoc[]', Hash], + ] + end end test 'escaped include directive is left unprocessed' do - input = <<-EOS -\\include::fixtures/include-file.asciidoc[] -\\escape preserved here + input = <<~'EOS' + \include::fixtures/include-file.adoc[] + \escape preserved here EOS - doc = empty_safe_document :base_dir => DIRNAME - reader = Asciidoctor::PreprocessorReader.new doc, input + doc = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true # we should be able to peek it multiple times and still have the backslash preserved # this is the test for @unescape_next_line - assert_equal 'include::fixtures/include-file.asciidoc[]', reader.peek_line - assert_equal 'include::fixtures/include-file.asciidoc[]', reader.peek_line - assert_equal 'include::fixtures/include-file.asciidoc[]', reader.read_line + assert_equal 'include::fixtures/include-file.adoc[]', reader.peek_line + assert_equal 'include::fixtures/include-file.adoc[]', reader.peek_line + assert_equal 'include::fixtures/include-file.adoc[]', reader.read_line assert_equal '\\escape preserved here', reader.read_line end test 'include directive not at start of line is ignored' do - input = <<-EOS - include::include-file.asciidoc[] - EOS + input = ' include::include-file.adoc[]' para = block_from_string input assert_equal 1, para.lines.size # NOTE the space gets stripped because the line is treated as an inline literal assert_equal :literal, para.context - assert_equal 'include::include-file.asciidoc[]', para.source + assert_equal 'include::include-file.adoc[]', para.source end test 'include directive is disabled when max-include-depth attribute is 0' do - input = <<-EOS -include::include-file.asciidoc[] - EOS - para = block_from_string input, :safe => :safe, :attributes => { 'max-include-depth' => 0 } + input = 'include::include-file.adoc[]' + para = block_from_string input, safe: :safe, attributes: { 'max-include-depth' => 0 } assert_equal 1, para.lines.size - assert_equal 'include::include-file.asciidoc[]', para.source + assert_equal 'include::include-file.adoc[]', para.source end test 'max-include-depth cannot be set by document' do - input = <<-EOS -:max-include-depth: 1 + input = <<~'EOS' + :max-include-depth: 1 -include::include-file.asciidoc[] + include::include-file.adoc[] EOS - para = block_from_string input, :safe => :safe, :attributes => { 'max-include-depth' => 0 } + para = block_from_string input, safe: :safe, attributes: { 'max-include-depth' => 0 } assert_equal 1, para.lines.size - assert_equal 'include::include-file.asciidoc[]', para.source + assert_equal 'include::include-file.adoc[]', para.source end test 'include directive should be disabled if max include depth has been exceeded' do - input = <<-EOS -include::fixtures/parent-include.adoc[depth=1] - EOS - - pseudo_docfile = File.join DIRNAME, 'include-master.adoc' - - doc = empty_safe_document :base_dir => DIRNAME - reader = Asciidoctor::PreprocessorReader.new doc, input, Asciidoctor::Reader::Cursor.new(pseudo_docfile) - - lines = reader.readlines - assert lines.include?('include::child-include.adoc[]') + input = 'include::fixtures/parent-include.adoc[depth=1]' + using_memory_logger do |logger| + pseudo_docfile = File.join DIRNAME, 'include-master.adoc' + doc = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new doc, input, Asciidoctor::Reader::Cursor.new(pseudo_docfile), normalize: true + lines = reader.readlines + assert_includes lines, 'include::grandchild-include.adoc[]' + assert_message logger, :ERROR, 'fixtures/child-include.adoc: line 3: maximum include depth of 1 exceeded', Hash + end end test 'include directive should be disabled if max include depth set in nested context has been exceeded' do - input = <<-EOS -include::fixtures/parent-include-restricted.adoc[depth=3] - EOS - - pseudo_docfile = File.join DIRNAME, 'include-master.adoc' - - doc = empty_safe_document :base_dir => DIRNAME - reader = Asciidoctor::PreprocessorReader.new doc, input, Asciidoctor::Reader::Cursor.new(pseudo_docfile) - - lines = reader.readlines - assert lines.include?('first line of child') - assert lines.include?('include::grandchild-include.adoc[]') + input = 'include::fixtures/parent-include-restricted.adoc[depth=3]' + using_memory_logger do |logger| + pseudo_docfile = File.join DIRNAME, 'include-master.adoc' + doc = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new doc, input, Asciidoctor::Reader::Cursor.new(pseudo_docfile), normalize: true + lines = reader.readlines + assert_includes lines, 'first line of child' + assert_includes lines, 'include::grandchild-include.adoc[]' + assert_message logger, :ERROR, 'fixtures/child-include.adoc: line 3: maximum include depth of 0 exceeded', Hash + end end test 'read_lines_until should not process lines if process option is false' do - lines = <<-EOS.each_line.to_a -//// -include::fixtures/no-such-file.adoc[] -//// + lines = <<~'EOS'.lines + //// + include::fixtures/no-such-file.adoc[] + //// EOS - doc = empty_safe_document :base_dir => DIRNAME - reader = Asciidoctor::PreprocessorReader.new doc, lines + doc = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new doc, lines, nil, normalize: true reader.read_line - result = reader.read_lines_until(:terminator => '////', :skip_processing => true) - assert_equal lines.map {|l| l.chomp}[1..1], result + result = reader.read_lines_until(terminator: '////', skip_processing: true) + assert_equal lines.map(&:chomp)[1..1], result end test 'skip_comment_lines should not process lines read' do - lines = <<-EOS.each_line.to_a -//// -include::fixtures/no-such-file.adoc[] -//// + lines = <<~'EOS'.lines + //// + include::fixtures/no-such-file.adoc[] + //// EOS - doc = empty_safe_document :base_dir => DIRNAME - reader = Asciidoctor::PreprocessorReader.new doc, lines - result = reader.skip_comment_lines - assert_equal lines.map {|l| l.chomp}, result + using_memory_logger do |logger| + doc = empty_safe_document base_dir: DIRNAME + reader = Asciidoctor::PreprocessorReader.new doc, lines, nil, normalize: true + reader.skip_comment_lines + assert reader.empty? + assert logger.empty? + end end end context 'Conditional Inclusions' do test 'process_line returns nil if cursor advanced' do - input = <<-EOS -ifdef::asciidoctor[] -Asciidoctor! -endif::asciidoctor[] + input = <<~'EOS' + ifdef::asciidoctor[] + Asciidoctor! + endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader - assert_nil reader.process_line(reader.lines.first) + assert_nil reader.send :process_line, reader.lines.first end test 'peek_line advances cursor to next conditional line of content' do - input = <<-EOS -ifdef::asciidoctor[] -Asciidoctor! -endif::asciidoctor[] + input = <<~'EOS' + ifdef::asciidoctor[] + Asciidoctor! + endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input @@ -1038,25 +1585,59 @@ assert_equal 2, reader.lineno end + test 'peek_lines should preprocess lines if direct is false' do + input = <<~'EOS' + The Asciidoctor + ifdef::asciidoctor[is in.] + EOS + doc = Asciidoctor::Document.new input + reader = doc.reader + result = reader.peek_lines 2, false + assert_equal ['The Asciidoctor', 'is in.'], result + end + + test 'peek_lines should not preprocess lines if direct is true' do + input = <<~'EOS' + The Asciidoctor + ifdef::asciidoctor[is in.] + EOS + doc = Asciidoctor::Document.new input + reader = doc.reader + result = reader.peek_lines 2, true + assert_equal ['The Asciidoctor', 'ifdef::asciidoctor[is in.]'], result + end + + test 'peek_lines should not prevent subsequent preprocessing of peeked lines' do + input = <<~'EOS' + The Asciidoctor + ifdef::asciidoctor[is in.] + EOS + doc = Asciidoctor::Document.new input + reader = doc.reader + result = reader.peek_lines 2, true + result = reader.peek_lines 2, false + assert_equal ['The Asciidoctor', 'is in.'], result + end + test 'process_line returns line if cursor not advanced' do - input = <<-EOS -content -ifdef::asciidoctor[] -Asciidoctor! -endif::asciidoctor[] + input = <<~'EOS' + content + ifdef::asciidoctor[] + Asciidoctor! + endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader - refute_nil reader.process_line(reader.lines.first) + refute_nil reader.send :process_line, reader.lines.first end test 'peek_line does not advance cursor when on a regular content line' do - input = <<-EOS -content -ifdef::asciidoctor[] -Asciidoctor! -endif::asciidoctor[] + input = <<~'EOS' + content + ifdef::asciidoctor[] + Asciidoctor! + endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input @@ -1067,10 +1648,10 @@ end test 'peek_line returns nil if cursor advances past end of source' do - input = <<-EOS -ifdef::foobar[] -swallowed content -endif::foobar[] + input = <<~'EOS' + ifdef::foobar[] + swallowed content + endif::foobar[] EOS doc = Asciidoctor::Document.new input @@ -1081,188 +1662,199 @@ end test 'ifdef with defined attribute includes content' do - input = <<-EOS -ifdef::holygrail[] -There is a holy grail! -endif::holygrail[] + input = <<~'EOS' + ifdef::holygrail[] + There is a holy grail! + endif::holygrail[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'holygrail' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'holygrail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal 'There is a holy grail!', (lines * ::Asciidoctor::EOL) + assert_equal 'There is a holy grail!', (lines * ::Asciidoctor::LF) end test 'ifdef with defined attribute includes text in brackets' do - input = <<-EOS -On our quest we go... -ifdef::holygrail[There is a holy grail!] -There was much rejoicing. + input = <<~'EOS' + On our quest we go... + ifdef::holygrail[There is a holy grail!] + There was much rejoicing. EOS - doc = Asciidoctor::Document.new input, :attributes => { 'holygrail' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'holygrail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal "On our quest we go...\nThere is a holy grail!\nThere was much rejoicing.", (lines * ::Asciidoctor::EOL) + assert_equal "On our quest we go...\nThere is a holy grail!\nThere was much rejoicing.", (lines * ::Asciidoctor::LF) + end + + test 'ifdef with defined attribute processes include directive in brackets' do + input = 'ifdef::asciidoctor-version[include::fixtures/include-file.adoc[tag=snippetA]]' + doc = Asciidoctor::Document.new input, safe: :safe, base_dir: DIRNAME + reader = doc.reader + lines = [] + while reader.has_more_lines? + lines << reader.read_line + end + assert_equal 'snippetA content', lines[0] end test 'ifdef attribute name is not case sensitive' do - input = <<-EOS -ifdef::showScript[] -The script is shown! -endif::showScript[] + input = <<~'EOS' + ifdef::showScript[] + The script is shown! + endif::showScript[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'showscript' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'showscript' => '' } result = doc.reader.read assert_equal 'The script is shown!', result end test 'ifndef with defined attribute does not include text in brackets' do - input = <<-EOS -On our quest we go... -ifndef::hardships[There is a holy grail!] -There was no rejoicing. + input = <<~'EOS' + On our quest we go... + ifndef::hardships[There is a holy grail!] + There was no rejoicing. EOS - doc = Asciidoctor::Document.new input, :attributes => { 'hardships' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'hardships' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal "On our quest we go...\nThere was no rejoicing.", (lines * ::Asciidoctor::EOL) + assert_equal "On our quest we go...\nThere was no rejoicing.", (lines * ::Asciidoctor::LF) end test 'include with non-matching nested exclude' do - input = <<-EOS -ifdef::grail[] -holy -ifdef::swallow[] -swallow -endif::swallow[] -grail -endif::grail[] + input = <<~'EOS' + ifdef::grail[] + holy + ifdef::swallow[] + swallow + endif::swallow[] + grail + endif::grail[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal "holy\ngrail", (lines * ::Asciidoctor::EOL) + assert_equal "holy\ngrail", (lines * ::Asciidoctor::LF) end test 'nested excludes with same condition' do - input = <<-EOS -ifndef::grail[] -ifndef::grail[] -not here -endif::grail[] -endif::grail[] + input = <<~'EOS' + ifndef::grail[] + ifndef::grail[] + not here + endif::grail[] + endif::grail[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal '', (lines * ::Asciidoctor::EOL) + assert_equal '', (lines * ::Asciidoctor::LF) end test 'include with nested exclude of inverted condition' do - input = <<-EOS -ifdef::grail[] -holy -ifndef::grail[] -not here -endif::grail[] -grail -endif::grail[] + input = <<~'EOS' + ifdef::grail[] + holy + ifndef::grail[] + not here + endif::grail[] + grail + endif::grail[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal "holy\ngrail", (lines * ::Asciidoctor::EOL) + assert_equal "holy\ngrail", (lines * ::Asciidoctor::LF) end test 'exclude with matching nested exclude' do - input = <<-EOS -poof -ifdef::swallow[] -no -ifdef::swallow[] -swallow -endif::swallow[] -here -endif::swallow[] -gone + input = <<~'EOS' + poof + ifdef::swallow[] + no + ifdef::swallow[] + swallow + endif::swallow[] + here + endif::swallow[] + gone EOS - doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal "poof\ngone", (lines * ::Asciidoctor::EOL) + assert_equal "poof\ngone", (lines * ::Asciidoctor::LF) end test 'exclude with nested include using shorthand end' do - input = <<-EOS -poof -ifndef::grail[] -no grail -ifndef::swallow[] -or swallow -endif::[] -in here -endif::[] -gone + input = <<~'EOS' + poof + ifndef::grail[] + no grail + ifndef::swallow[] + or swallow + endif::[] + in here + endif::[] + gone EOS - doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal "poof\ngone", (lines * ::Asciidoctor::EOL) + assert_equal "poof\ngone", (lines * ::Asciidoctor::LF) end test 'ifdef with one alternative attribute set includes content' do - input = <<-EOS -ifdef::holygrail,swallow[] -Our quest is complete! -endif::holygrail,swallow[] + input = <<~'EOS' + ifdef::holygrail,swallow[] + Our quest is complete! + endif::holygrail,swallow[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'swallow' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'swallow' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) + assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::LF) end test 'ifdef with no alternative attributes set does not include content' do - input = <<-EOS -ifdef::holygrail,swallow[] -Our quest is complete! -endif::holygrail,swallow[] + input = <<~'EOS' + ifdef::holygrail,swallow[] + Our quest is complete! + endif::holygrail,swallow[] EOS doc = Asciidoctor::Document.new input @@ -1271,46 +1863,64 @@ while reader.has_more_lines? lines << reader.read_line end - assert_equal '', (lines * ::Asciidoctor::EOL) + assert_equal '', (lines * ::Asciidoctor::LF) end test 'ifdef with all required attributes set includes content' do - input = <<-EOS -ifdef::holygrail+swallow[] -Our quest is complete! -endif::holygrail+swallow[] + input = <<~'EOS' + ifdef::holygrail+swallow[] + Our quest is complete! + endif::holygrail+swallow[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'holygrail' => '', 'swallow' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'holygrail' => '', 'swallow' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) + assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::LF) end test 'ifdef with missing required attributes does not include content' do - input = <<-EOS -ifdef::holygrail+swallow[] -Our quest is complete! -endif::holygrail+swallow[] + input = <<~'EOS' + ifdef::holygrail+swallow[] + Our quest is complete! + endif::holygrail+swallow[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'holygrail' => '' } + doc = Asciidoctor::Document.new input, attributes: { 'holygrail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal '', (lines * ::Asciidoctor::EOL) + assert_equal '', (lines * ::Asciidoctor::LF) + end + + test 'ifdef should permit leading, trailing, and repeat operators' do + { + 'asciidoctor,' => 'content', + ',asciidoctor' => 'content', + 'asciidoctor+' => '', + '+asciidoctor' => '', + 'asciidoctor,,asciidoctor-version' => 'content', + 'asciidoctor++asciidoctor-version' => '', + }.each do |condition, expected| + input = <<~EOS + ifdef::#{condition}[] + content + endif::[] + EOS + assert_equal expected, (document_from_string input, parse: false).reader.read + end end test 'ifndef with undefined attribute includes block' do - input = <<-EOS -ifndef::holygrail[] -Our quest continues to find the holy grail! -endif::holygrail[] + input = <<~'EOS' + ifndef::holygrail[] + Our quest continues to find the holy grail! + endif::holygrail[] EOS doc = Asciidoctor::Document.new input @@ -1319,78 +1929,121 @@ while reader.has_more_lines? lines << reader.read_line end - assert_equal 'Our quest continues to find the holy grail!', (lines * ::Asciidoctor::EOL) + assert_equal 'Our quest continues to find the holy grail!', (lines * ::Asciidoctor::LF) end - test 'ifndef with one alternative attribute set includes content' do - input = <<-EOS -ifndef::holygrail,swallow[] -Our quest is complete! -endif::holygrail,swallow[] + test 'ifndef with one alternative attribute set does not include content' do + input = <<~'EOS' + ifndef::holygrail,swallow[] + Our quest is complete! + endif::holygrail,swallow[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'swallow' => '' } - reader = doc.reader - lines = [] - while reader.has_more_lines? - lines << reader.read_line - end - assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) + result = (Asciidoctor::Document.new input, attributes: { 'swallow' => '' }).reader.read + assert_empty result + end + + test 'ifndef with both alternative attributes set does not include content' do + input = <<~'EOS' + ifndef::holygrail,swallow[] + Our quest is complete! + endif::holygrail,swallow[] + EOS + + result = (Asciidoctor::Document.new input, attributes: { 'swallow' => '', 'holygrail' => '' }).reader.read + assert_empty result end test 'ifndef with no alternative attributes set includes content' do - input = <<-EOS -ifndef::holygrail,swallow[] -Our quest is complete! -endif::holygrail,swallow[] + input = <<~'EOS' + ifndef::holygrail,swallow[] + Our quest is complete! + endif::holygrail,swallow[] EOS - doc = Asciidoctor::Document.new input - reader = doc.reader - lines = [] - while reader.has_more_lines? - lines << reader.read_line + result = (Asciidoctor::Document.new input).reader.read + assert_equal 'Our quest is complete!', result + end + + test 'ifndef with no required attributes set includes content' do + input = <<~'EOS' + ifndef::holygrail+swallow[] + Our quest is complete! + endif::holygrail+swallow[] + EOS + + result = (Asciidoctor::Document.new input).reader.read + assert_equal 'Our quest is complete!', result + end + + test 'ifndef with all required attributes set does not include content' do + input = <<~'EOS' + ifndef::holygrail+swallow[] + Our quest is complete! + endif::holygrail+swallow[] + EOS + + result = (Asciidoctor::Document.new input, attributes: { 'swallow' => '', 'holygrail' => '' }).reader.read + assert_empty result + end + + test 'ifndef with at least one required attributes set does not include content' do + input = <<~'EOS' + ifndef::holygrail+swallow[] + Our quest is complete! + endif::holygrail+swallow[] + EOS + + result = (Asciidoctor::Document.new input, attributes: { 'swallow' => '' }).reader.read + assert_equal 'Our quest is complete!', result + end + + test 'should log warning if endif is unmatched' do + input = <<~'EOS' + Our quest is complete! + endif::on-quest[] + EOS + + using_memory_logger do |logger| + result = (Asciidoctor::Document.new input, attributes: { 'on-quest' => '' }).reader.read + assert_equal 'Our quest is complete!', result + assert_message logger, :ERROR, '~: line 2: unmatched preprocessor directive: endif::on-quest[]', Hash end - assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) end - test 'ifndef with any required attributes set does not include content' do - input = <<-EOS -ifndef::holygrail+swallow[] -Our quest is complete! -endif::holygrail+swallow[] + test 'should log warning if endif is mismatched' do + input = <<~'EOS' + ifdef::on-quest[] + Our quest is complete! + endif::on-journey[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'swallow' => '' } - reader = doc.reader - lines = [] - while reader.has_more_lines? - lines << reader.read_line + using_memory_logger do |logger| + result = (Asciidoctor::Document.new input, attributes: { 'on-quest' => '' }).reader.read + assert_equal 'Our quest is complete!', result + assert_message logger, :ERROR, '~: line 3: mismatched preprocessor directive: endif::on-journey[]', Hash end - assert_equal '', (lines * ::Asciidoctor::EOL) end - test 'ifndef with no required attributes set includes content' do - input = <<-EOS -ifndef::holygrail+swallow[] -Our quest is complete! -endif::holygrail+swallow[] + test 'should log warning if endif contains text' do + input = <<~'EOS' + ifdef::on-quest[] + Our quest is complete! + endif::on-quest[complete!] EOS - doc = Asciidoctor::Document.new input - reader = doc.reader - lines = [] - while reader.has_more_lines? - lines << reader.read_line + using_memory_logger do |logger| + result = (Asciidoctor::Document.new input, attributes: { 'on-quest' => '' }).reader.read + assert_equal 'Our quest is complete!', result + assert_message logger, :ERROR, '~: line 3: malformed preprocessor directive - text not permitted: endif::on-quest[complete!]', Hash end - assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) end test 'escaped ifdef is unescaped and ignored' do - input = <<-EOS -\\ifdef::holygrail[] -content -\\endif::holygrail[] + input = <<~'EOS' + \ifdef::holygrail[] + content + \endif::holygrail[] EOS doc = Asciidoctor::Document.new input @@ -1399,14 +2052,14 @@ while reader.has_more_lines? lines << reader.read_line end - assert_equal "ifdef::holygrail[]\ncontent\nendif::holygrail[]", (lines * ::Asciidoctor::EOL) + assert_equal "ifdef::holygrail[]\ncontent\nendif::holygrail[]", (lines * ::Asciidoctor::LF) end test 'ifeval comparing missing attribute to nil includes content' do - input = <<-EOS -ifeval::['{foo}' == ''] -No foo for you! -endif::[] + input = <<~'EOS' + ifeval::['{foo}' == ''] + No foo for you! + endif::[] EOS doc = Asciidoctor::Document.new input @@ -1415,14 +2068,14 @@ while reader.has_more_lines? lines << reader.read_line end - assert_equal 'No foo for you!', (lines * ::Asciidoctor::EOL) + assert_equal 'No foo for you!', (lines * ::Asciidoctor::LF) end test 'ifeval comparing missing attribute to 0 drops content' do - input = <<-EOS -ifeval::[{leveloffset} == 0] -I didn't make the cut! -endif::[] + input = <<~'EOS' + ifeval::[{leveloffset} == 0] + I didn't make the cut! + endif::[] EOS doc = Asciidoctor::Document.new input @@ -1431,62 +2084,62 @@ while reader.has_more_lines? lines << reader.read_line end - assert_equal '', (lines * ::Asciidoctor::EOL) + assert_equal '', (lines * ::Asciidoctor::LF) end test 'ifeval comparing double-quoted attribute to matching string includes content' do - input = <<-EOS -ifeval::["{gem}" == "asciidoctor"] -Asciidoctor it is! -endif::[] + input = <<~'EOS' + ifeval::["{gem}" == "asciidoctor"] + Asciidoctor it is! + endif::[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'gem' => 'asciidoctor' } + doc = Asciidoctor::Document.new input, attributes: { 'gem' => 'asciidoctor' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal 'Asciidoctor it is!', (lines * ::Asciidoctor::EOL) + assert_equal 'Asciidoctor it is!', (lines * ::Asciidoctor::LF) end test 'ifeval comparing single-quoted attribute to matching string includes content' do - input = <<-EOS -ifeval::['{gem}' == 'asciidoctor'] -Asciidoctor it is! -endif::[] + input = <<~'EOS' + ifeval::['{gem}' == 'asciidoctor'] + Asciidoctor it is! + endif::[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'gem' => 'asciidoctor' } + doc = Asciidoctor::Document.new input, attributes: { 'gem' => 'asciidoctor' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal 'Asciidoctor it is!', (lines * ::Asciidoctor::EOL) + assert_equal 'Asciidoctor it is!', (lines * ::Asciidoctor::LF) end test 'ifeval comparing quoted attribute to non-matching string drops content' do - input = <<-EOS -ifeval::['{gem}' == 'asciidoctor'] -Asciidoctor it is! -endif::[] + input = <<~'EOS' + ifeval::['{gem}' == 'asciidoctor'] + Asciidoctor it is! + endif::[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'gem' => 'tilt' } + doc = Asciidoctor::Document.new input, attributes: { 'gem' => 'tilt' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal '', (lines * ::Asciidoctor::EOL) + assert_equal '', (lines * ::Asciidoctor::LF) end test 'ifeval comparing attribute to lower version number includes content' do - input = <<-EOS -ifeval::['{asciidoctor-version}' >= '0.1.0'] -That version will do! -endif::[] + input = <<~'EOS' + ifeval::['{asciidoctor-version}' >= '0.1.0'] + That version will do! + endif::[] EOS doc = Asciidoctor::Document.new input @@ -1495,14 +2148,14 @@ while reader.has_more_lines? lines << reader.read_line end - assert_equal 'That version will do!', (lines * ::Asciidoctor::EOL) + assert_equal 'That version will do!', (lines * ::Asciidoctor::LF) end test 'ifeval comparing attribute to self includes content' do - input = <<-EOS -ifeval::['{asciidoctor-version}' == '{asciidoctor-version}'] -Of course it's the same! -endif::[] + input = <<~'EOS' + ifeval::['{asciidoctor-version}' == '{asciidoctor-version}'] + Of course it's the same! + endif::[] EOS doc = Asciidoctor::Document.new input @@ -1511,14 +2164,14 @@ while reader.has_more_lines? lines << reader.read_line end - assert_equal 'Of course it\'s the same!', (lines * ::Asciidoctor::EOL) + assert_equal 'Of course it\'s the same!', (lines * ::Asciidoctor::LF) end test 'ifeval arguments can be transposed' do - input = <<-EOS -ifeval::['0.1.0' <= '{asciidoctor-version}'] -That version will do! -endif::[] + input = <<~'EOS' + ifeval::['0.1.0' <= '{asciidoctor-version}'] + That version will do! + endif::[] EOS doc = Asciidoctor::Document.new input @@ -1527,54 +2180,119 @@ while reader.has_more_lines? lines << reader.read_line end - assert_equal 'That version will do!', (lines * ::Asciidoctor::EOL) + assert_equal 'That version will do!', (lines * ::Asciidoctor::LF) end test 'ifeval matching numeric equality includes content' do - input = <<-EOS -ifeval::[{rings} == 1] -One ring to rule them all! -endif::[] + input = <<~'EOS' + ifeval::[{rings} == 1] + One ring to rule them all! + endif::[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'rings' => '1' } + doc = Asciidoctor::Document.new input, attributes: { 'rings' => '1' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal 'One ring to rule them all!', (lines * ::Asciidoctor::EOL) + assert_equal 'One ring to rule them all!', (lines * ::Asciidoctor::LF) end test 'ifeval matching numeric inequality includes content' do - input = <<-EOS -ifeval::[{rings} != 0] -One ring to rule them all! -endif::[] + input = <<~'EOS' + ifeval::[{rings} != 0] + One ring to rule them all! + endif::[] EOS - doc = Asciidoctor::Document.new input, :attributes => { 'rings' => '1' } + doc = Asciidoctor::Document.new input, attributes: { 'rings' => '1' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end - assert_equal 'One ring to rule them all!', (lines * ::Asciidoctor::EOL) + assert_equal 'One ring to rule them all!', (lines * ::Asciidoctor::LF) + end + + test 'should warn if ifeval has target' do + input = <<~'EOS' + ifeval::target[1 == 1] + content + EOS + + using_memory_logger do |logger| + doc = Asciidoctor::Document.new input + reader = doc.reader + lines = [] + lines << reader.read_line while reader.has_more_lines? + assert_equal 'content', (lines * ::Asciidoctor::LF) + assert_message logger, :ERROR, '~: line 1: malformed preprocessor directive - target not permitted: ifeval::target[1 == 1]', Hash + end + end + + test 'should warn if ifeval has invalid expression' do + input = <<~'EOS' + ifeval::[1 | 2] + content + EOS + + using_memory_logger do |logger| + doc = Asciidoctor::Document.new input + reader = doc.reader + lines = [] + lines << reader.read_line while reader.has_more_lines? + assert_equal 'content', (lines * ::Asciidoctor::LF) + assert_message logger, :ERROR, '~: line 1: malformed preprocessor directive - invalid expression: ifeval::[1 | 2]', Hash + end + end + + test 'should warn if ifeval is missing expression' do + input = <<~'EOS' + ifeval::[] + content + EOS + + using_memory_logger do |logger| + doc = Asciidoctor::Document.new input + reader = doc.reader + lines = [] + lines << reader.read_line while reader.has_more_lines? + assert_equal 'content', (lines * ::Asciidoctor::LF) + assert_message logger, :ERROR, '~: line 1: malformed preprocessor directive - missing expression: ifeval::[]', Hash + end end test 'ifdef with no target is ignored' do - input = <<-EOS -ifdef::[] -content + input = <<~'EOS' + ifdef::[] + content EOS - doc = Asciidoctor::Document.new input - reader = doc.reader - lines = [] - while reader.has_more_lines? - lines << reader.read_line + using_memory_logger do |logger| + doc = Asciidoctor::Document.new input + reader = doc.reader + lines = [] + lines << reader.read_line while reader.has_more_lines? + assert_equal 'content', (lines * ::Asciidoctor::LF) + assert_message logger, :ERROR, '~: line 1: malformed preprocessor directive - missing target: ifdef::[]', Hash + end + end + + test 'should not warn if preprocessor directive is invalid if already skipping' do + input = <<~'EOS' + ifdef::attribute-not-set[] + foo + ifdef::[] + bar + endif::[] + EOS + + using_memory_logger do |logger| + result = (Asciidoctor::Document.new input).reader.read + assert_empty result + assert_empty logger end - assert_equal "ifdef::[]\ncontent", (lines * ::Asciidoctor::EOL) end end end diff -Nru asciidoctor-1.5.5/test/sections_test.rb asciidoctor-2.0.10/test/sections_test.rb --- asciidoctor-1.5.5/test/sections_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/sections_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,8 +1,5 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context 'Sections' do context 'Ids' do @@ -11,21 +8,57 @@ assert_equal '_section_one', sec.id end - test 'synthetic id replaces non-word characters with underscores' do - sec = block_from_string("== We're back!") - assert_equal '_we_re_back', sec.id + test 'duplicate synthetic id is automatically enumerated' do + doc = document_from_string <<~'EOS' + == Section One + + == Section One + EOS + assert_equal 2, doc.blocks.size + assert_equal '_section_one', doc.blocks[0].id + assert_equal '_section_one_2', doc.blocks[1].id + end + + test 'synthetic id removes non-word characters' do + sec = block_from_string("== We’re back!") + assert_equal '_were_back', sec.id end - test 'synthetic id removes repeating underscores' do + test 'synthetic id removes repeating separators' do sec = block_from_string('== Section $ One') assert_equal '_section_one', sec.id end test 'synthetic id removes entities' do - sec = block_from_string('== Ben & Jerry & Company "Ice Cream Brothers" ✾') + sec = block_from_string('== Ben & Jerry & Company¹ "Ice Cream Brothers" あ') assert_equal '_ben_jerry_company_ice_cream_brothers', sec.id end + test 'synthetic id removes adjacent entities with mixed case' do + sec = block_from_string('== a ®&© b') + assert_equal '_a_b', sec.id + end + + test 'synthetic id removes XML tags' do + sec = block_from_string('== Use the `run` command to make it icon:gear[]') + assert_equal '_use_the_run_command_to_make_it_gear', sec.id + end + + test 'synthetic id collapses repeating spaces' do + sec = block_from_string('== Go Far') + assert_equal '_go_far', sec.id + end + + test 'synthetic id replaces hyphens with separator' do + sec = block_from_string('== State-of-the-art design') + assert_equal '_state_of_the_art_design', sec.id + end + + test 'synthetic id replaces dots with separator' do + sec = block_from_string("== Section 1.1.1") + assert_equal '_section_1_1_1', sec.id + end + test 'synthetic id prefix can be customized' do sec = block_from_string(":idprefix: id_\n\n== Section One") assert_equal 'id_section_one', sec.id @@ -37,7 +70,7 @@ end test 'synthetic id prefix is stripped from beginning of id if set to blank' do - sec = block_from_string(":idprefix:\n\n== & More") + sec = block_from_string(":idprefix:\n\n== & ! More") assert_equal 'more', sec.id end @@ -46,6 +79,26 @@ assert_equal '_section-one', sec.id end + test 'synthetic id separator can be hyphen and hyphens are preserved' do + sec = block_from_string(":idseparator: -\n\n== State-of-the-art design") + assert_equal '_state-of-the-art-design', sec.id + end + + test 'synthetic id separator can be dot and dots are preserved' do + sec = block_from_string(":idseparator: .\n\n== Version 5.0.1") + assert_equal '_version.5.0.1', sec.id + end + + test 'synthetic id separator can only be one character' do + input = <<~'EOS' + :idseparator: -=- + + == This Section Is All You Need + EOS + sec = block_from_string input + assert_equal '_this-section-is-all-you-need', sec.id + end + test 'synthetic id separator can be set to blank' do sec = block_from_string(":idseparator:\n\n== Section One") assert_equal '_sectionone', sec.id @@ -63,7 +116,7 @@ test 'synthetic ids can be disabled' do sec = block_from_string(":sectids!:\n\n== Section One\n") - assert sec.id.nil? + assert_nil sec.id end test 'explicit id in anchor above section title overrides synthetic id' do @@ -71,12 +124,43 @@ assert_equal 'one', sec.id end + test 'explicit id in block attributes above section title overrides synthetic id' do + sec = block_from_string("[id=one]\n== Section One") + assert_equal 'one', sec.id + end + + test 'explicit id set using shorthand in style above section title overrides synthetic id' do + sec = block_from_string("[#one]\n== Section One") + assert_equal 'one', sec.id + end + + test 'should use explicit id from last block attribute line above section title that defines an explicit id' do + input = <<~'EOS' + [#un] + [#one] + == Section One + EOS + sec = block_from_string input + assert_equal 'one', sec.id + end + test 'explicit id can be defined using an embedded anchor' do sec = block_from_string("== Section One [[one]] ==") assert_equal 'one', sec.id assert_equal 'Section One', sec.title end + test 'explicit id can be defined using an embedded anchor when using setext section titles' do + input = <<~'EOS' + Section Title [[refid,reftext]] + ------------------------------- + EOS + sec = block_from_string input + assert_equal 'Section Title', sec.title + assert_equal 'refid', sec.id + assert_equal 'reftext', (sec.attr 'reftext') + end + test 'explicit id can be defined using an embedded anchor with reftext' do sec = block_from_string("== Section One [[one,Section Uno]] ==") assert_equal 'one', sec.id @@ -104,20 +188,26 @@ assert_equal 'Section One [[one]]', sec.title end + test 'should not process inline anchor in section title if section has explicit ID' do + sec = block_from_string(%([#sect-one]\n== Section One [[one]])) + assert_equal 'sect-one', sec.id + assert_equal 'Section One ', sec.title + end + test 'title substitutions are applied before generating id' do sec = block_from_string("== Section{sp}One\n") assert_equal '_section_one', sec.id end test 'synthetic ids are unique' do - input = <<-EOS -== Some section + input = <<~'EOS' + == Some section -text + text -== Some section + == Some section -text + text EOS doc = document_from_string input assert_equal '_some_section', doc.blocks[0].id @@ -128,14 +218,14 @@ test 'can set start index of synthetic ids' do old_unique_id_start_index = Asciidoctor::Compliance.unique_id_start_index begin - input = <<-EOS -== Some section + input = <<~'EOS' + == Some section -text + text -== Some section + == Some section -text + text EOS Asciidoctor::Compliance.unique_id_start_index = 1 doc = document_from_string input @@ -147,366 +237,754 @@ end test 'should use specified id and reftext when registering section reference' do - input = <<-EOS -[[install,Install Procedure]] -== Install + input = <<~'EOS' + [[install,Install Procedure]] + == Install -content + content EOS doc = document_from_string input - reftext = doc.references[:ids]['install'] - refute_nil reftext - assert_equal 'Install Procedure', reftext + ref = doc.catalog[:refs]['install'] + refute_nil ref + assert_equal 'Install Procedure', ref.reftext + assert_equal 'install', (doc.resolve_id 'Install Procedure') end test 'should use specified reftext when registering section reference' do - input = <<-EOS -[reftext="Install Procedure"] -== Install + input = <<~'EOS' + [reftext="Install Procedure"] + == Install -content + content EOS doc = document_from_string input - reftext = doc.references[:ids]['_install'] - refute_nil reftext - assert_equal 'Install Procedure', reftext + ref = doc.catalog[:refs]['_install'] + refute_nil ref + assert_equal 'Install Procedure', ref.reftext + assert_equal '_install', (doc.resolve_id 'Install Procedure') end - test 'should not overwrite existing id entry in references table' do - input = <<-EOS -[#install] -== First Install + test 'should resolve attribute reference in title using attribute defined at location of section title' do + input = <<~'EOS' + :platform-id: linux + :platform-name: Linux + + [#install-{platform-id}] + == Install on {platform-name} -content + content -[#install] -== Second Install + :platform-id: win32 + :platform-name: Windows -content + [#install-{platform-id}] + == Install on {platform-name} + + content EOS doc = document_from_string input - reftext = doc.references[:ids]['install'] - refute_nil reftext - assert_equal 'First Install', reftext + ref = doc.catalog[:refs]['install-win32'] + refute_nil ref + assert_equal 'Install on Windows', ref.title + assert_equal 'install-win32', (doc.resolve_id 'Install on Windows') end - test 'should not overwrite existing id entry with generated reftext in references table' do - input = <<-EOS -[#install] -== First Install + test 'should substitute attributes when registering reftext for section' do + input = <<~'EOS' + :platform-name: n/a + == Overview + + :platform-name: Linux -content + [[install,install on {platform-name}]] + == Install -[#install] -content + content EOS doc = document_from_string input - reftext = doc.references[:ids]['install'] - refute_nil reftext - assert_equal 'First Install', reftext + ref = doc.catalog[:refs]['install'] + refute_nil ref + assert_equal 'install on Linux', ref.reftext + assert_equal 'install', (doc.resolve_id 'install on Linux') end - end - context "document title (level 0)" do - test "document title with multiline syntax" do - title = "My Title" - chars = "=" * title.length - assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars) - assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars + "\n") - end + test 'duplicate section id should not overwrite existing section id entry in references table' do + input = <<~'EOS' + [#install] + == First Install - test "document title with multiline syntax, give a char" do - title = "My Title" - chars = "=" * (title.length + 1) - assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars) - assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars + "\n") + content + + [#install] + == Second Install + + content + EOS + + using_memory_logger do |logger| + doc = document_from_string input + ref = doc.catalog[:refs]['install'] + refute_nil ref + assert_nil ref.reftext + assert_equal 'First Install', ref.title + assert_equal 'install', (doc.resolve_id 'First Install') + assert_message logger, :WARN, ': line 7: id assigned to section already in use: install', Hash + end end - test "document title with multiline syntax, take a char" do - title = "My Title" - chars = "=" * (title.length - 1) - assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars) - assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars + "\n") + test 'should warn if explicit section ID matches auto-generated section ID' do + input = <<~'EOS' + == Do Not Repeat Yourself + + content + + [#_do_not_repeat_yourself] + == Do Not Repeat Yourself + + content + EOS + + using_memory_logger do |logger| + doc = document_from_string input + ref = doc.catalog[:refs]['_do_not_repeat_yourself'] + refute_nil ref + assert_nil ref.reftext + assert_equal 'Do Not Repeat Yourself', ref.title + assert_equal '_do_not_repeat_yourself', (doc.resolve_id 'Do Not Repeat Yourself') + assert_message logger, :WARN, ': line 6: id assigned to section already in use: _do_not_repeat_yourself', Hash + assert_equal 2, (doc.convert.scan 'id="_do_not_repeat_yourself"').size + end end - test 'document title with multiline syntax and unicode characters' do - input = <<-EOS -AsciiDoc Writer’s Guide -======================= -Author Name + test 'duplicate block id should not overwrite existing section id entry in references table' do + input = <<~'EOS' + [#install] + == First Install + + content -preamble + [#install] + content EOS - result = render_string input - assert_xpath '//h1', result, 1 - assert_xpath '//h1[text()="AsciiDoc Writer’s Guide"]', result, 1 + using_memory_logger do |logger| + doc = document_from_string input + ref = doc.catalog[:refs]['install'] + refute_nil ref + assert_nil ref.reftext + assert_equal 'First Install', ref.title + assert_equal 'install', (doc.resolve_id 'First Install') + assert_message logger, :WARN, ': line 7: id assigned to block already in use: install', Hash + end end + end - test "not enough chars for a multiline document title" do - title = "My Title" - chars = "=" * (title.length - 2) - assert_xpath '//h1', render_string(title + "\n" + chars), 0 - assert_xpath '//h1', render_string(title + "\n" + chars + "\n"), 0 + context 'Levels' do + context 'Document Title (Level 0)' do + test "document title with multiline syntax" do + title = "My Title" + chars = "=" * title.length + assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars) + assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars + "\n") + end + + test "document title with multiline syntax, give a char" do + title = "My Title" + chars = "=" * (title.length + 1) + assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars) + assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars + "\n") + end + + test "document title with multiline syntax, take a char" do + title = "My Title" + chars = "=" * (title.length - 1) + assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars) + assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars + "\n") + end + + test 'document title with multiline syntax and unicode characters' do + input = <<~'EOS' + AsciiDoc Writer’s Guide + ======================= + Author Name + + preamble + EOS + + result = convert_string input + assert_xpath '//h1', result, 1 + assert_xpath '//h1[text()="AsciiDoc Writer’s Guide"]', result, 1 + end + + test "not enough chars for a multiline document title" do + title = "My Title" + chars = "=" * (title.length - 2) + using_memory_logger do |logger| + output = convert_string(title + "\n" + chars) + assert_xpath '//h1', output, 0 + refute logger.empty? + logger.clear + output = convert_string(title + "\n" + chars + "\n") + assert_xpath '//h1', output, 0 + refute logger.empty? + end + end + + test "too many chars for a multiline document title" do + title = "My Title" + chars = "=" * (title.length + 2) + using_memory_logger do |logger| + output = convert_string(title + "\n" + chars) + assert_xpath '//h1', output, 0 + refute logger.empty? + logger.clear + output = convert_string(title + "\n" + chars + "\n") + assert_xpath '//h1', output, 0 + refute logger.empty? + end + end + + test "document title with multiline syntax cannot begin with a dot" do + title = ".My Title" + chars = "=" * title.length + using_memory_logger do |logger| + output = convert_string(title + "\n" + chars) + assert_xpath '//h1', output, 0 + refute logger.empty? + end + end + + test "document title with atx syntax" do + assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string("= My Title") + end + + test "document title with symmetric syntax" do + assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string("= My Title =") + end + + test 'document title created from leveloffset shift defined in document' do + assert_xpath "//h1[not(@id)][text() = 'Document Title']", convert_string(%(:leveloffset: -1\n== Document Title)) + end + + test 'document title created from leveloffset shift defined in API' do + assert_xpath "//h1[not(@id)][text() = 'Document Title']", convert_string('== Document Title', attributes: { 'leveloffset' => '-1@' }) + end + + test 'should assign id on document title to body' do + input = <<~'EOS' + [[idname]] + = Document Title + + content + EOS + output = convert_string input + assert_css 'body#idname', output, 1 + end + + test 'should assign id defined using shorthand syntax on document title to body' do + input = <<~'EOS' + [#idname] + = Document Title + + content + EOS + output = convert_string input + assert_css 'body#idname', output, 1 + end + + test 'should use ID defined in block attributes instead of ID defined inline' do + input = <<~'EOS' + [#idname-block] + = Document Title [[idname-inline]] + + content + EOS + output = convert_string input + assert_css 'body#idname-block', output, 1 + end + + test 'block id above document title sets id on document' do + input = <<~'EOS' + [[reference]] + = Reference Manual + :css-signature: refguide + + preamble + EOS + doc = document_from_string input + assert_equal 'reference', doc.id + assert_equal 'refguide', doc.attr('css-signature') + output = doc.convert + assert_css 'body#reference', output, 1 + end + + test 'should register document in catalog if id is set' do + input = <<~'EOS' + [[manual,Manual]] + = Reference Manual + + preamble + EOS + doc = document_from_string input + assert_equal 'manual', doc.id + assert_equal 'Manual', doc.attributes['reftext'] + assert_equal doc, doc.catalog[:refs]['manual'] + end + + test 'should compute xreftext to document title' do + input = <<~'EOS' + [#manual] + = Reference Manual + :xrefstyle: full + + This is the <>. + EOS + output = convert_string input + assert_xpath '//a[text()="Reference Manual"]', output, 1 + end + + test 'should discard style, role and options shorthand attributes defined on document title' do + input = <<~'EOS' + [style#idname.rolename%optionname] + = Document Title + + content + EOS + doc = document_from_string input + assert_empty doc.blocks[0].attributes + output = doc.convert + assert_css '#idname', output, 1 + assert_css 'body#idname', output, 1 + assert_css '.rolename', output, 1 + assert_css 'body.rolename', output, 1 + end end - test "too many chars for a multiline document title" do - title = "My Title" - chars = "=" * (title.length + 2) - assert_xpath '//h1', render_string(title + "\n" + chars), 0 - assert_xpath '//h1', render_string(title + "\n" + chars + "\n"), 0 + context 'Level 1' do + test "with multiline syntax" do + assert_xpath "//h2[@id='_my_section'][text() = 'My Section']", convert_string("My Section\n-----------") + end + + test 'should not recognize underline containing a mix of characters as setext section title' do + input = <<~'EOS' + My Section + ----^^---- + EOS + + result = convert_string_to_embedded input + assert_xpath '//h2[@id="_my_section"][text() = "My Section"]', result, 0 + assert_includes result, '----^^----' + end + + test 'should not recognize section title that does not contain alphanumeric character' do + input = <<~'EOS' + !@#$ + ---- + EOS + + using_memory_logger do |logger| + result = convert_string_to_embedded input + assert_css 'h2', result, 0 + end + end + + test 'should not recognize section title that consists of only underscores' do + input = <<~'EOS' + ____ + ---- + EOS + + using_memory_logger do |logger| + result = convert_string_to_embedded input + assert_css 'h2', result, 0 + end + end + + test 'should preprocess second line of setext section title' do + input = <<~'EOS' + Section Title + ifdef::asciidoctor[] + ------------- + endif::[] + EOS + result = convert_string_to_embedded input + assert_xpath '//h2', result, 1 + end + + test "heading title with multiline syntax cannot begin with a dot" do + title = ".My Title" + chars = "-" * title.length + using_memory_logger do |logger| + output = convert_string(title + "\n" + chars) + assert_xpath '//h2', output, 0 + refute logger.empty? + end + end + + test "with atx syntax" do + assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", convert_string("== My Title") + end + + test "with atx symmetric syntax" do + assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", convert_string("== My Title ==") + end + + test "with atx non-matching symmetric syntax" do + assert_xpath "//h2[@id='_my_title'][text() = 'My Title ===']", convert_string("== My Title ===") + end + + test "with XML entity" do + assert_xpath "//h2[@id='_whats_new'][text() = \"What#{decode_char 8217}s new?\"]", convert_string("== What's new?") + end + + test "with non-word character" do + assert_xpath "//h2[@id='_whats_new'][text() = \"What’s new?\"]", convert_string("== What’s new?") + end + + test "with sequential non-word characters" do + assert_xpath "//h2[@id='_what_the_is_this'][text() = 'What the \#@$ is this?']", convert_string('== What the #@$ is this?') + end + + test "with trailing whitespace" do + assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", convert_string("== My Title ") + end + + test "with custom blank idprefix" do + assert_xpath "//h2[@id='my_title'][text() = 'My Title']", convert_string(":idprefix:\n\n== My Title ") + end + + test "with custom non-blank idprefix" do + assert_xpath "//h2[@id='ref_my_title'][text() = 'My Title']", convert_string(":idprefix: ref_\n\n== My Title ") + end + + test 'with multibyte characters' do + input = '== Asciidoctor in 中文' + output = convert_string input + assert_xpath '//h2[@id="_asciidoctor_in_中文"][text()="Asciidoctor in 中文"]', output + end + + test 'with only multibyte characters' do + input = '== 视图' + output = convert_string_to_embedded input + assert_xpath '//h2[@id="_视图"][text()="视图"]', output + end + + test 'multiline syntax with only multibyte characters' do + input = <<~'EOS' + 视图 + -- + + content + + 连接器 + --- + + content + EOS + # see https://github.com/oracle/truffleruby/issues/1563 + input = String.new input, encoding: ::Encoding::UTF_8 if RUBY_ENGINE == 'truffleruby' + output = convert_string_to_embedded input + assert_xpath '//h2[@id="_视图"][text()="视图"]', output + assert_xpath '//h2[@id="_连接器"][text()="连接器"]', output + end end - test "document title with multiline syntax cannot begin with a dot" do - title = ".My Title" - chars = "=" * title.length - assert_xpath '//h1', render_string(title + "\n" + chars), 0 + context 'Level 2' do + test "with multiline syntax" do + assert_xpath "//h3[@id='_my_section'][text() = 'My Section']", convert_string(":fragment:\nMy Section\n~~~~~~~~~~~") + end + + test "with atx line syntax" do + assert_xpath "//h3[@id='_my_title'][text() = 'My Title']", convert_string(":fragment:\n=== My Title") + end end - test "document title with single-line syntax" do - assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string("= My Title") + context 'Level 3' do + test "with multiline syntax" do + assert_xpath "//h4[@id='_my_section'][text() = 'My Section']", convert_string(":fragment:\nMy Section\n^^^^^^^^^^") + end + + test 'with atx line syntax' do + assert_xpath "//h4[@id='_my_title'][text() = 'My Title']", convert_string(":fragment:\n==== My Title") + end end - test "document title with symmetric syntax" do - assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string("= My Title =") + context 'Level 4' do + test "with multiline syntax" do + assert_xpath "//h5[@id='_my_section'][text() = 'My Section']", convert_string(":fragment:\nMy Section\n++++++++++") + end + + test "with atx line syntax" do + assert_xpath "//h5[@id='_my_title'][text() = 'My Title']", convert_string(":fragment:\n===== My Title") + end end - test 'should assign id on document title to body' do - input = <<-EOS -[[idname]] -= Document Title + context 'Level 5' do + test "with atx line syntax" do + assert_xpath "//h6[@id='_my_title'][text() = 'My Title']", convert_string(":fragment:\n====== My Title") + end + end + end -content - EOS - output = render_string input - assert_css 'body#idname', output, 1 + context 'Substitutions' do + test 'should apply substitutions in normal order' do + input = <<~'EOS' + == {link-url}[{link-text}]{tm} + + The one and only! + EOS + + output = convert_string_to_embedded input, attributes: { + 'link-url' => 'https://acme.com', + 'link-text' => 'ACME', + 'tm' => '(TM)', + } + assert_css 'h2', output, 1 + assert_css 'h2 a[href="https://acme.com"]', output, 1 + assert_xpath %(//h2[contains(text(),"#{decode_char 8482}")]), output, 1 end + end - test 'should assign id defined using shorthand syntax on document title to body' do - input = <<-EOS -[#idname] -= Document Title + context 'Nesting' do + test 'should warn if section title is out of sequence' do + input = <<~'EOS' + = Document Title -content + == Section A + + ==== Nested Section + + content + + == Section B + + content EOS - output = render_string input - assert_css 'body#idname', output, 1 + + using_memory_logger do |logger| + result = convert_string_to_embedded input + assert_xpath '//h4[text()="Nested Section"]', result, 1 + assert_message logger, :WARN, ': line 5: section title out of sequence: expected level 2, got level 3', Hash + end end - test 'should use inline id instead of id defined in block attributes' do - input = <<-EOS -[#idname-block] -= Document Title [[idname-inline]] + test 'should warn if chapter title is out of sequence' do + input = <<~'EOS' + = Document Title + :doctype: book -content + === Not a Chapter + + content EOS - output = render_string input - assert_css 'body#idname-inline', output, 1 + + using_memory_logger do |logger| + result = convert_string_to_embedded input + assert_xpath '//h3[text()="Not a Chapter"]', result, 1 + assert_message logger, :WARN, ': line 4: section title out of sequence: expected levels 0 or 1, got level 2', Hash + end end - test 'block id above document title sets id on document' do - input = <<-EOS -[[reference]] -= Reference Manual -:css-signature: refguide + test 'should not warn if top-level section title is out of sequence when fragment attribute is set on document' do + input = <<~'EOS' + = Document Title + + === First Section -preamble + content EOS - doc = document_from_string input - assert_equal 'reference', doc.id - assert_equal 'refguide', doc.attr('css-signature') - output = doc.render - assert_css 'body#reference', output, 1 + + using_memory_logger do |logger| + convert_string_to_embedded input, attributes: { 'fragment' => '' } + assert logger.empty? + end end - test 'should discard style, role and options shorthand attributes defined on document title' do - input = <<-EOS -[style#idname.rolename%optionname] -= Document Title + test 'should warn if nested section title is out of sequence when fragment attribute is set on document' do + input = <<~'EOS' + = Document Title + + === First Section -content + ===== Nested Section EOS - doc = document_from_string input - assert doc.blocks[0].attributes.empty? - output = doc.convert - assert_css 'body#idname', output, 1 - assert_css '.rolename', output, 0 - end - end - context "level 1" do - test "with multiline syntax" do - assert_xpath "//h2[@id='_my_section'][text() = 'My Section']", render_string("My Section\n-----------") + using_memory_logger do |logger| + convert_string_to_embedded input, attributes: { 'fragment' => '' } + assert_message logger, :WARN, ': line 5: section title out of sequence: expected level 3, got level 4', Hash + end end + test 'should log error if subsections are found in special sections in article that do not support subsections' do + input = <<~'EOS' + = Document Title - test "heading title with multiline syntax cannot begin with a dot" do - title = ".My Title" - chars = "-" * title.length - assert_xpath '//h2', render_string(title + "\n" + chars), 0 - end + == Section - test "with single-line syntax" do - assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", render_string("== My Title") - end + === Subsection of Section - test "with single-line symmetric syntax" do - assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", render_string("== My Title ==") - end + allowed - test "with single-line non-matching symmetric syntax" do - assert_xpath "//h2[@id='_my_title'][text() = 'My Title ===']", render_string("== My Title ===") - end + [appendix] + == Appendix - test "with XML entity" do - assert_xpath "//h2[@id='_where_s_the_love'][text() = \"Where#{[8217].pack('U*')}s the love?\"]", render_string("== Where's the love?") - end + === Subsection of Appendix - test "with non-word character" do - assert_xpath "//h2[@id='_where_s_the_love'][text() = \"Where’s the love?\"]", render_string("== Where’s the love?") - end + allowed - test "with sequential non-word characters" do - assert_xpath "//h2[@id='_what_the_is_this'][text() = 'What the \#@$ is this?']", render_string('== What the #@$ is this?') - end + [glossary] + == Glossary - test "with trailing whitespace" do - assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", render_string("== My Title ") - end + === Subsection of Glossary - test "with custom blank idprefix" do - assert_xpath "//h2[@id='my_title'][text() = 'My Title']", render_string(":idprefix:\n\n== My Title ") - end + not allowed - test "with custom non-blank idprefix" do - assert_xpath "//h2[@id='ref_my_title'][text() = 'My Title']", render_string(":idprefix: ref_\n\n== My Title ") - end + [bibliography] + == Bibliography + + === Subsection of Bibliography - test 'with multibyte characters' do - input = <<-EOS -== Asciidoctor in 中文 + not allowed EOS - output = render_string input - if ::RUBY_MIN_VERSION_1_9 - assert_xpath '//h2[@id="_asciidoctor_in_中文"][text()="Asciidoctor in 中文"]', output - else - assert_xpath '//h2[@id="_asciidoctor_in"][text()="Asciidoctor in 中文"]', output + + using_memory_logger do |logger| + convert_string_to_embedded input + assert_messages logger, [ + [:ERROR, ': line 19: glossary sections do not support nested sections', Hash], + [:ERROR, ': line 26: bibliography sections do not support nested sections', Hash], + ] end end - test 'with only multibyte characters' do - input = <<-EOS -== 视图 - EOS - output = render_embedded_string input - assert_xpath '//h2[@id="_视图"][text()="视图"]', output - end if ::RUBY_MIN_VERSION_1_9 + test 'should log error if subsections are found in special sections in book that do not support subsections' do + input = <<~'EOS' + = Document Title + :doctype: book - test 'multiline syntax with only multibyte characters' do - input = <<-EOS -视图 --- + [preface] + = Preface -content + === Subsection of Preface -连接器 ---- + allowed -content - EOS - output = render_embedded_string input - assert_xpath '//h2[@id="_视图"][text()="视图"]', output - assert_xpath '//h2[@id="_连接器"][text()="连接器"]', output - end if ::RUBY_MIN_VERSION_1_9 - end + [colophon] + = Colophon - context "level 2" do - test "with multiline syntax" do - assert_xpath "//h3[@id='_my_section'][text() = 'My Section']", render_string(":fragment:\nMy Section\n~~~~~~~~~~~") - end + === Subsection of Colophon - test "with single line syntax" do - assert_xpath "//h3[@id='_my_title'][text() = 'My Title']", render_string(":fragment:\n=== My Title") - end - end + not allowed - context "level 3" do - test "with multiline syntax" do - assert_xpath "//h4[@id='_my_section'][text() = 'My Section']", render_string(":fragment:\nMy Section\n^^^^^^^^^^") - end + [dedication] + = Dedication - test "with single line syntax" do - assert_xpath "//h4[@id='_my_title'][text() = 'My Title']", render_string(":fragment:\n==== My Title") - end - end + === Subsection of Dedication - context "level 4" do - test "with multiline syntax" do - assert_xpath "//h5[@id='_my_section'][text() = 'My Section']", render_string(":fragment:\nMy Section\n++++++++++") - end + not allowed - test "with single line syntax" do - assert_xpath "//h5[@id='_my_title'][text() = 'My Title']", render_string(":fragment:\n===== My Title") - end - end + = Part 1 + + [abstract] + == Abstract + + === Subsection of Abstract + + allowed + + == Chapter 1 + + === Subsection of Chapter + + allowed + + [appendix] + = Appendix + + === Subsection of Appendix + + allowed + + [glossary] + = Glossary + + === Subsection of Glossary + + not allowed + + [bibliography] + = Bibliography + + === Subsection of Bibliography - context "level 5" do - test "with single line syntax" do - assert_xpath "//h6[@id='_my_title'][text() = 'My Title']", render_string(":fragment:\n====== My Title") + not allowed + EOS + + using_memory_logger do |logger| + convert_string_to_embedded input + assert_messages logger, [ + [:ERROR, ': line 14: colophon sections do not support nested sections', Hash], + [:ERROR, ': line 21: dedication sections do not support nested sections', Hash], + [:ERROR, ': line 50: glossary sections do not support nested sections', Hash], + [:ERROR, ': line 57: bibliography sections do not support nested sections', Hash] + ] + end end end context 'Markdown-style headings' do - test 'single-line document title with leading marker' do - input = <<-EOS -# Document Title - EOS - output = render_string input + test 'atx document title with leading marker' do + input = '# Document Title' + output = convert_string input assert_xpath "//h1[not(@id)][text() = 'Document Title']", output, 1 end - test 'single-line document title with symmetric markers' do - input = <<-EOS -# Document Title # - EOS - output = render_string input + test 'atx document title with symmetric markers' do + input = '# Document Title #' + output = convert_string input assert_xpath "//h1[not(@id)][text() = 'Document Title']", output, 1 end - test 'single-line section title with leading marker' do - input = <<-EOS -## Section One + test 'atx section title with leading marker' do + input = <<~'EOS' + ## Section One -blah blah + blah blah EOS - output = render_string input + output = convert_string input assert_xpath "//h2[@id='_section_one'][text() = 'Section One']", output, 1 end - test 'single-line section title with symmetric markers' do - input = <<-EOS -## Section One ## + test 'atx section title with symmetric markers' do + input = <<~'EOS' + ## Section One ## -blah blah + blah blah EOS - output = render_string input + output = convert_string input assert_xpath "//h2[@id='_section_one'][text() = 'Section One']", output, 1 end + + test 'should not match atx syntax with mixed markers' do + input = '=#= My Title' + output = convert_string_to_embedded input + assert_xpath "//h3[@id='_my_title'][text() = 'My Title']", output, 0 + assert_includes output, '

    =#= My Title

    ' + end end - context 'Floating Title' do - test 'should create floating title if style is float' do - input = <<-EOS -[float] -= Independent Heading! + context 'Discrete Heading' do + test 'should create discrete heading instead of section if style is float' do + input = <<~'EOS' + [float] + = Independent Heading! -not in section + not in section EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/h1[@id="_independent_heading"]', output, 1 assert_xpath '/h1[@class="float"]', output, 1 assert_xpath %(/h1[@class="float"][text()="Independent Heading!"]), output, 1 @@ -515,15 +993,15 @@ assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end - test 'should create floating title if style is discrete' do - input = <<-EOS -[discrete] -=== Independent Heading! + test 'should create discrete heading instead of section if style is discrete' do + input = <<~'EOS' + [discrete] + === Independent Heading! -not in section + not in section EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/h3', output, 1 assert_xpath '/h3[@id="_independent_heading"]', output, 1 assert_xpath '/h3[@class="discrete"]', output, 1 @@ -533,15 +1011,29 @@ assert_xpath '/h3/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end - test 'should create floating title if style is float with shorthand role and id' do - input = <<-EOS -[float.independent#first] -= Independent Heading! + test 'should generate id for discrete heading from converted title' do + input = <<~'EOS' + [discrete] + === {sp}Heading{sp} -not in section + not in section EOS - output = render_embedded_string input + output = convert_string_to_embedded input + assert_xpath '/h3', output, 1 + assert_xpath '/h3[@class="discrete"][@id="_heading"]', output, 1 + assert_xpath '/h3[@class="discrete"][@id="_heading"][text()=" Heading "]', output, 1 + end + + test 'should create discrete heading if style is float with shorthand role and id' do + input = <<~'EOS' + [float.independent#first] + = Independent Heading! + + not in section + EOS + + output = convert_string_to_embedded input assert_xpath '/h1[@id="first"]', output, 1 assert_xpath '/h1[@class="float independent"]', output, 1 assert_xpath %(/h1[@class="float independent"][text()="Independent Heading!"]), output, 1 @@ -550,15 +1042,15 @@ assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end - test 'should create floating title if style is discrete with shorthand role and id' do - input = <<-EOS -[discrete.independent#first] -= Independent Heading! + test 'should create discrete heading if style is discrete with shorthand role and id' do + input = <<~'EOS' + [discrete.independent#first] + = Independent Heading! -not in section + not in section EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/h1[@id="first"]', output, 1 assert_xpath '/h1[@class="discrete independent"]', output, 1 assert_xpath %(/h1[@class="discrete independent"][text()="Independent Heading!"]), output, 1 @@ -567,191 +1059,226 @@ assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end - test 'floating title should be a block with context floating_title' do - input = <<-EOS -[float] -=== Independent Heading! + test 'discrete heading should be a block with context floating_title' do + input = <<~'EOS' + [float] + === Independent Heading! -not in section + not in section EOS doc = document_from_string input - floatingtitle = doc.blocks.first - assert floatingtitle.is_a?(Asciidoctor::Block) - assert floatingtitle.context != :section - assert_equal :floating_title, floatingtitle.context - assert_equal '_independent_heading', floatingtitle.id - assert doc.references[:ids].has_key?('_independent_heading') - end - - test 'can assign explicit id to floating title' do - input = <<-EOS -[[unchained]] -[float] -=== Independent Heading! + heading = doc.blocks.first + assert_kind_of Asciidoctor::Block, heading + assert_equal :floating_title, heading.context + assert_equal '_independent_heading', heading.id + assert doc.catalog[:refs].key? '_independent_heading' + end + + test 'should preprocess second line of setext discrete heading' do + input = <<~'EOS' + [discrete] + Heading Title + ifdef::asciidoctor[] + ------------- + endif::[] + EOS + result = convert_string_to_embedded input + assert_xpath '//h2', result, 1 + end + + test 'can assign explicit id to discrete heading' do + input = <<~'EOS' + [[unchained]] + [float] + === Independent Heading! -not in section + not in section EOS doc = document_from_string input - floating_title = doc.blocks.first - assert_equal 'unchained', floating_title.id - assert doc.references[:ids].has_key?('unchained') + heading = doc.blocks.first + assert_equal 'unchained', heading.id + assert doc.catalog[:refs].key? 'unchained' end - test 'should not include floating title in toc' do - input = <<-EOS -:toc: + test 'should not include discrete heading in toc' do + input = <<~'EOS' + :toc: -== Section One + == Section One -[float] -=== Miss Independent + [float] + === Miss Independent -== Section Two + == Section Two EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="toc"]', output, 1 assert_xpath %(//*[@id="toc"]//a[contains(text(), "Section ")]), output, 2 assert_xpath %(//*[@id="toc"]//a[text()="Miss Independent"]), output, 0 end - test 'should not set id on floating title if sectids attribute is unset' do - input = <<-EOS -[float] -=== Independent Heading! + test 'should not set id on discrete heading if sectids attribute is unset' do + input = <<~'EOS' + [float] + === Independent Heading! -not in section + not in section EOS - output = render_embedded_string input, :attributes => {'sectids' => nil} + output = convert_string_to_embedded input, attributes: { 'sectids' => nil } assert_xpath '/h3', output, 1 assert_xpath '/h3[@id="_independent_heading"]', output, 0 assert_xpath '/h3[@class="float"]', output, 1 end - test 'should use explicit id for floating title if specified' do - input = <<-EOS -[[free]] -[float] -== Independent Heading! + test 'should use explicit id for discrete heading if specified' do + input = <<~'EOS' + [[free]] + [float] + == Independent Heading! -not in section + not in section EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/h2', output, 1 assert_xpath '/h2[@id="free"]', output, 1 assert_xpath '/h2[@class="float"]', output, 1 end - test 'should add role to class attribute on floating title' do - input = <<-EOS -[float, role="isolated"] -== Independent Heading! + test 'should add role to class attribute on discrete heading' do + input = <<~'EOS' + [float, role="isolated"] + == Independent Heading! -not in section + not in section EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/h2', output, 1 assert_xpath '/h2[@id="_independent_heading"]', output, 1 assert_xpath '/h2[@class="float isolated"]', output, 1 end + test 'should ignore title attribute on discrete heading' do + input = <<~'EOS' + [discrete,title="Captured!"] + == Independent Heading! + + not in section + EOS + + doc = document_from_string input + heading = doc.blocks[0] + assert_equal 'Independent Heading!', heading.title + refute heading.attributes.key? 'title' + end + test 'should use specified id and reftext when registering discrete section reference' do - input = <<-EOS -[[install,Install Procedure]] -[discrete] -== Install + input = <<~'EOS' + [[install,Install Procedure]] + [discrete] + == Install -content + content EOS doc = document_from_string input - reftext = doc.references[:ids]['install'] - refute_nil reftext - assert_equal 'Install Procedure', reftext + ref = doc.catalog[:refs]['install'] + refute_nil ref + assert_equal 'Install Procedure', ref.reftext + assert_equal 'install', (doc.resolve_id 'Install Procedure') end test 'should use specified reftext when registering discrete section reference' do - input = <<-EOS -[reftext="Install Procedure"] -[discrete] -== Install + input = <<~'EOS' + [reftext="Install Procedure"] + [discrete] + == Install -content + content EOS doc = document_from_string input - reftext = doc.references[:ids]['_install'] - refute_nil reftext - assert_equal 'Install Procedure', reftext + ref = doc.catalog[:refs]['_install'] + refute_nil ref + assert_equal 'Install Procedure', ref.reftext + assert_equal '_install', (doc.resolve_id 'Install Procedure') + end + + test 'should not process inline anchor in discrete heading if explicit ID is assigned' do + input = <<~'EOS' + [discrete#install] + == Install [[installation]] + + content + EOS + + block = block_from_string input + assert_equal block.id, 'install' + assert_equal 'Install ', block.title end end context 'Level offset' do test 'should print error if standalone document is included without level offset' do - input = <<-EOS -= Master Document -Doc Writer + input = <<~'EOS' + = Master Document + Doc Writer -text in master + text in master -// begin simulated include::[] -= Standalone Document -:author: Junior Writer + // begin simulated include::[] + = Standalone Document + :author: Junior Writer -text in standalone + text in standalone -// end simulated include::[] + // end simulated include::[] EOS - output = warnings = nil - redirect_streams do |out, err| - output = render_string input - warnings = err.string + using_memory_logger do |logger| + convert_string input + assert_message logger, :ERROR, ': line 7: level 0 sections can only be used when doctype is book', Hash end - - assert !warnings.empty? - assert_match(/only book doctypes can contain level 0 sections/, warnings) end test 'should add level offset to section level' do - input = <<-EOS -= Master Document -Doc Writer + input = <<~'EOS' + = Master Document + Doc Writer -Master document written by {author}. + Master document written by {author}. -:leveloffset: 1 + :leveloffset: 1 -// begin simulated include::[] -= Standalone Document -:author: Junior Writer + // begin simulated include::[] + = Standalone Document + :author: Junior Writer -Standalone document written by {author}. + Standalone document written by {author}. -== Section in Standalone + == Section in Standalone -Standalone section text. -// end simulated include::[] + Standalone section text. + // end simulated include::[] -:leveloffset!: + :leveloffset!: -== Section in Master + == Section in Master -Master section text. + Master section text. EOS - output = warnings = nil - redirect_streams do |out, err| - output = render_string input - warnings = err.string + output = nil + using_memory_logger do |logger| + output = convert_string input + assert logger.empty? end - assert warnings.empty? assert_match(/Master document written by Doc Writer/, output) assert_match(/Standalone document written by Junior Writer/, output) assert_xpath '//*[@class="sect1"]/h2[text() = "Standalone Document"]', output, 1 @@ -759,65 +1286,65 @@ assert_xpath '//*[@class="sect1"]/h2[text() = "Section in Master"]', output, 1 end - test 'level offset should be added to floating title' do - input = <<-EOS -= Master Document -Doc Writer + test 'level offset should be added to discrete heading' do + input = <<~'EOS' + = Master Document + Doc Writer -:leveloffset: 1 + :leveloffset: 1 -[float] -= Floating Title + [float] + = Discrete Heading EOS - output = render_string input - assert_xpath '//h2[@class="float"][text() = "Floating Title"]', output, 1 + output = convert_string input + assert_xpath '//h2[@class="float"][text() = "Discrete Heading"]', output, 1 end test 'should be able to reset level offset' do - input = <<-EOS -= Master Document -Doc Writer + input = <<~'EOS' + = Master Document + Doc Writer -Master preamble. + Master preamble. -:leveloffset: 1 + :leveloffset: 1 -= Standalone Document + = Standalone Document -Standalone preamble. + Standalone preamble. -:leveloffset!: + :leveloffset!: -== Level 1 Section + == Level 1 Section EOS - output = render_string input + output = convert_string input assert_xpath '//*[@class = "sect1"]/h2[text() = "Standalone Document"]', output, 1 assert_xpath '//*[@class = "sect1"]/h2[text() = "Level 1 Section"]', output, 1 end test 'should add relative offset value to current leveloffset' do - input = <<-EOS -= Master Document -Doc Writer + input = <<~'EOS' + = Master Document + Doc Writer -Master preamble. + Master preamble. -:leveloffset: 1 + :leveloffset: 1 -= Chapter 1 + = Chapter 1 -content + content -:leveloffset: +1 + :leveloffset: +1 -= Standalone Section + = Standalone Section -content + content EOS - output = render_string input + output = convert_string input assert_xpath '//*[@class = "sect1"]/h2[text() = "Chapter 1"]', output, 1 assert_xpath '//*[@class = "sect2"]/h3[text() = "Standalone Section"]', output, 1 end @@ -825,76 +1352,86 @@ context 'Section Numbering' do test 'should create section number with one entry for level 1' do - sect1 = Asciidoctor::Section.new + doc = empty_document + sect1 = Asciidoctor::Section.new nil, nil, true + doc << sect1 assert_equal '1.', sect1.sectnum end test 'should create section number with two entries for level 2' do - sect1 = Asciidoctor::Section.new - sect1_1 = Asciidoctor::Section.new(sect1) + doc = empty_document + sect1 = Asciidoctor::Section.new nil, nil, true + doc << sect1 + sect1_1 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_1 assert_equal '1.1.', sect1_1.sectnum end test 'should create section number with three entries for level 3' do - sect1 = Asciidoctor::Section.new - sect1_1 = Asciidoctor::Section.new(sect1) + doc = empty_document + sect1 = Asciidoctor::Section.new nil, nil, true + doc << sect1 + sect1_1 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_1 - sect1_1_1 = Asciidoctor::Section.new(sect1_1) + sect1_1_1 = Asciidoctor::Section.new sect1_1, nil, true sect1_1 << sect1_1_1 assert_equal '1.1.1.', sect1_1_1.sectnum end test 'should create section number for second section in level' do - sect1 = Asciidoctor::Section.new - sect1_1 = Asciidoctor::Section.new(sect1) + doc = empty_document + sect1 = Asciidoctor::Section.new nil, nil, true + doc << sect1 + sect1_1 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_1 - sect1_2 = Asciidoctor::Section.new(sect1) + sect1_2 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_2 assert_equal '1.2.', sect1_2.sectnum end test 'sectnum should use specified delimiter and append string' do - sect1 = Asciidoctor::Section.new - sect1_1 = Asciidoctor::Section.new(sect1) + doc = empty_document + sect1 = Asciidoctor::Section.new nil, nil, true + doc << sect1 + sect1_1 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_1 - sect1_1_1 = Asciidoctor::Section.new(sect1_1) + sect1_1_1 = Asciidoctor::Section.new sect1_1, nil, true sect1_1 << sect1_1_1 assert_equal '1,1,1,', sect1_1_1.sectnum(',') assert_equal '1:1:1', sect1_1_1.sectnum(':', false) end - test 'should render section numbers when sectnums attribute is set' do - input = <<-EOS -= Title -:sectnums: + test 'should output section numbers when sectnums attribute is set' do + input = <<~'EOS' + = Title + :sectnums: -== Section_1 + == Section_1 -text + text -=== Section_1_1 + === Section_1_1 -text + text -==== Section_1_1_1 + ==== Section_1_1_1 -text + text -== Section_2 + == Section_2 -text + text -=== Section_2_1 + === Section_2_1 -text + text -=== Section_2_2 + === Section_2_2 -text + text EOS - output = render_string input + output = convert_string input assert_xpath '//h2[@id="_section_1"][starts-with(text(), "1. ")]', output, 1 assert_xpath '//h3[@id="_section_1_1"][starts-with(text(), "1.1. ")]', output, 1 assert_xpath '//h4[@id="_section_1_1_1"][starts-with(text(), "1.1.1. ")]', output, 1 @@ -903,37 +1440,37 @@ assert_xpath '//h3[@id="_section_2_2"][starts-with(text(), "2.2. ")]', output, 1 end - test 'should render section numbers when numbered attribute is set' do - input = <<-EOS -= Title -:numbered: + test 'should output section numbers when numbered attribute is set' do + input = <<~'EOS' + = Title + :numbered: -== Section_1 + == Section_1 -text + text -=== Section_1_1 + === Section_1_1 -text + text -==== Section_1_1_1 + ==== Section_1_1_1 -text + text -== Section_2 + == Section_2 -text + text -=== Section_2_1 + === Section_2_1 -text + text -=== Section_2_2 + === Section_2_2 -text + text EOS - output = render_string input + output = convert_string input assert_xpath '//h2[@id="_section_1"][starts-with(text(), "1. ")]', output, 1 assert_xpath '//h3[@id="_section_1_1"][starts-with(text(), "1.1. ")]', output, 1 assert_xpath '//h4[@id="_section_1_1_1"][starts-with(text(), "1.1.1. ")]', output, 1 @@ -942,19 +1479,159 @@ assert_xpath '//h3[@id="_section_2_2"][starts-with(text(), "2.2. ")]', output, 1 end + test 'should not crash if child section of part is out of sequence and part numbering is disabled' do + input = <<~'EOS' + = Document Title + :doctype: book + :sectnums: + + = Part + + === Out of Sequence Section + EOS + + using_memory_logger do |logger| + output = convert_string input + assert_xpath '//h1[text()="Part"]', output, 1 + assert_xpath '//h3[text()=".1. Out of Sequence Section"]', output, 1 + end + end + + test 'should not hang if relative leveloffset attempts to make resolved section level negative' do + input = <<~'EOS' + = Document Title + :doctype: book + :leveloffset: -1 + + = Part Title + + == Chapter Title + EOS + + using_memory_logger do |logger| + output = convert_string input + assert_xpath '//h1[text()="Part Title"]', output, 1 + assert_xpath '//h1[text()="Chapter Title"]', output, 1 + end + end + + test 'should number parts when doctype is book and partnums attributes is set' do + input = <<~'EOS' + = Book Title + :doctype: book + :sectnums: + :partnums: + + = Language + + == Syntax + + content + + = Processor + + == CLI + + content + EOS + + output = convert_string input + assert_xpath '//h1[@id="_language"][text() = "I: Language"]', output, 1 + assert_xpath '//h1[@id="_processor"][text() = "II: Processor"]', output, 1 + end + + test 'should assign sequential roman numerals to book parts' do + input = <<~'EOS' + = Book Title + :doctype: book + :sectnums: + :partnums: + + = First Part + + part intro + + == First Chapter + + = Second Part + + part intro + + == Second Chapter + EOS + + doc = document_from_string input + assert_equal 'I', doc.sections[0].numeral + assert_equal '1', doc.sections[0].sections[0].numeral + assert_equal 'II', doc.sections[1].numeral + assert_equal '2', doc.sections[1].sections[0].numeral + end + + test 'should prepend value of part-signifier attribute to title of numbered part' do + input = <<~'EOS' + = Book Title + :doctype: book + :sectnums: + :partnums: + :part-signifier: Part + + = Language + + == Syntax + + content + + = Processor + + == CLI + + content + EOS + + output = convert_string input + assert_xpath '//h1[@id="_language"][text() = "Part I: Language"]', output, 1 + assert_xpath '//h1[@id="_processor"][text() = "Part II: Processor"]', output, 1 + end + + test 'should prepend value of chapter-signifier attribute to title of numbered chapter' do + input = <<~'EOS' + = Book Title + :doctype: book + :sectnums: + :partnums: + :chapter-signifier: Chapter + + = Language + + == Syntax + + content + + = Processor + + == CLI + + content + EOS + + output = convert_string input + assert_xpath '//h2[@id="_syntax"][text() = "Chapter 1. Syntax"]', output, 1 + assert_xpath '//h2[@id="_cli"][text() = "Chapter 2. CLI"]', output, 1 + end + test 'blocks should have level' do - input = <<-EOS -= Title + input = <<~'EOS' + = Title -preamble + preamble -== Section 1 + == Section 1 -paragraph + paragraph -=== Section 1.1 + === Section 1.1 -paragraph + paragraph EOS doc = document_from_string input assert_equal 0, doc.blocks[0].level @@ -965,30 +1642,30 @@ end test 'section numbers should not increment when numbered attribute is turned off within document' do - input = <<-EOS -= Document Title -:numbered: + input = <<~'EOS' + = Document Title + :numbered: -:numbered!: + :numbered!: -== Colophon Section + == Colophon Section -== Another Colophon Section + == Another Colophon Section -== Final Colophon Section + == Final Colophon Section -:numbered: + :numbered: -== Section One + == Section One -=== Section One Subsection + === Section One Subsection -== Section Two + == Section Two -== Section Three + == Section Three EOS - output = render_string input + output = convert_string input assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 @@ -1000,29 +1677,29 @@ end test 'section numbers can be toggled even if numbered attribute is enable via the API' do - input = <<-EOS -= Document Title + input = <<~'EOS' + = Document Title -:numbered!: + :numbered!: -== Colophon Section + == Colophon Section -== Another Colophon Section + == Another Colophon Section -== Final Colophon Section + == Final Colophon Section -:numbered: + :numbered: -== Section One + == Section One -=== Section One Subsection + === Section One Subsection -== Section Two + == Section Two -== Section Three + == Section Three EOS - output = render_string input, :attributes => {'numbered' => ''} + output = convert_string input, attributes: { 'numbered' => '' } assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 @@ -1034,29 +1711,29 @@ end test 'section numbers cannot be toggled even if numbered attribute is disabled via the API' do - input = <<-EOS -= Document Title + input = <<~'EOS' + = Document Title -:numbered!: + :numbered!: -== Colophon Section + == Colophon Section -== Another Colophon Section + == Another Colophon Section -== Final Colophon Section + == Final Colophon Section -:numbered: + :numbered: -== Section One + == Section One -=== Section One Subsection + === Section One Subsection -== Section Two + == Section Two -== Section Three + == Section Three EOS - output = render_string input, :attributes => {'numbered!' => ''} + output = convert_string input, attributes: { 'numbered!' => '' } assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 @@ -1067,30 +1744,30 @@ assert_xpath '//h2[@id="_section_three"][text()="Section Three"]', output, 1 end - # NOTE AsciiDoc fails this test because it does not properly check for a None value when looking up the numbered attribute + # NOTE AsciiDoc Python fails this test because it does not properly check for a None value when looking up the numbered attribute test 'section numbers should not increment until numbered attribute is turned back on' do - input = <<-EOS -= Document Title -:numbered!: + input = <<~'EOS' + = Document Title + :numbered!: -== Colophon Section + == Colophon Section -== Another Colophon Section + == Another Colophon Section -== Final Colophon Section + == Final Colophon Section -:numbered: + :numbered: -== Section One + == Section One -=== Section One Subsection + === Section One Subsection -== Section Two + == Section Two -== Section Three + == Section Three EOS - output = render_string input + output = convert_string input assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 @@ -1102,24 +1779,24 @@ end test 'table with asciidoc content should not disable numbering of subsequent sections' do - input = <<-EOS -= Document Title -:numbered: + input = <<~'EOS' + = Document Title + :numbered: -preamble + preamble -== Section One + == Section One -|=== -a|content -|=== + |=== + a|content + |=== -== Section Two + == Section Two -content + content EOS - output = render_string input + output = convert_string input assert_xpath '//h2[@id="_section_one"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//h2[@id="_section_two"]', output, 1 @@ -1127,25 +1804,25 @@ end test 'should not number parts when doctype is book' do - input = <<-EOS -= Document Title -:doctype: book -:numbered: + input = <<~'EOS' + = Document Title + :doctype: book + :numbered: -= Part 1 + = Part 1 -== Chapter 1 + == Chapter 1 -content + content -= Part 2 + = Part 2 -== Chapter 2 + == Chapter 2 -content + content EOS - output = render_string input + output = convert_string input assert_xpath '(//h1)[1][text()="Document Title"]', output, 1 assert_xpath '(//h1)[2][text()="Part 1"]', output, 1 assert_xpath '(//h1)[3][text()="Part 2"]', output, 1 @@ -1154,53 +1831,95 @@ end test 'should number chapters sequentially even when divided into parts' do - input = <<-EOS -= Document Title -:doctype: book -:numbered: + input = <<~'EOS' + = Document Title + :doctype: book + :numbered: -== Chapter 1 + == Chapter 1 -content + content -= Part 1 + = Part 1 -== Chapter 2 + == Chapter 2 -content + content -= Part 2 + = Part 2 -== Chapter 3 + == Chapter 3 -content + content -== Chapter 4 + == Chapter 4 -content + content EOS - result = render_string input + result = convert_string input (1..4).each do |num| assert_xpath %(//h2[@id="_chapter_#{num}"]), result, 1 assert_xpath %(//h2[@id="_chapter_#{num}"][text()="#{num}. Chapter #{num}"]), result, 1 end end + + test 'reindex_sections should correct section enumeration after sections are modified' do + input = <<~'EOS' + :sectnums: + + == First Section + + content + + == Last Section + + content + EOS + + doc = document_from_string input + second_section = Asciidoctor::Section.new doc, nil, true + doc.blocks.insert 1, second_section + doc.reindex_sections + sections = doc.sections + [0, 1, 2].each do |index| + assert_equal index, sections[index].index + assert_equal (index + 1).to_s, sections[index].numeral + assert_equal index + 1, sections[index].number + end + end + + test 'should allow sections to be renumbered using numberal property' do + input = <<~'EOS' + == Somewhere in the Middle + + == The End + EOS + + doc = document_from_string input, attributes: { 'sectnums' => '' } + doc.sections.each do |sect| + sect.numeral = sect.numeral.next + end + + output = doc.convert standalone: false + assert_xpath '//h2[text()="2. Somewhere in the Middle"]', output, 1 + assert_xpath '//h2[text()="3. The End"]', output, 1 + end end context 'Links and anchors' do test 'should include anchor if sectanchors document attribute is set' do - input = <<-EOS -== Installation + input = <<~'EOS' + == Installation -Installation section. + Installation section. -=== Linux + === Linux -Linux installation instructions. + Linux installation instructions. EOS - output = render_embedded_string input, :attributes => {'sectanchors' => ''} + output = convert_string_to_embedded input, attributes: { 'sectanchors' => '' } assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[@class="anchor"][@href="#_installation"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a/following-sibling::text()="Installation"', output, true @@ -1209,18 +1928,38 @@ assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a/following-sibling::text()="Linux"', output, true end + test 'should position after title text if sectanchors is set to after' do + input = <<~'EOS' + == Installation + + Installation section. + + === Linux + + Linux installation instructions. + EOS + + output = convert_string_to_embedded input, attributes: { 'sectanchors' => 'after' } + assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a', output, 1 + assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[@class="anchor"][@href="#_installation"]', output, 1 + assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a/preceding-sibling::text()="Installation"', output, true + assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a', output, 1 + assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a[@class="anchor"][@href="#_linux"]', output, 1 + assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a/preceding-sibling::text()="Linux"', output, true + end + test 'should link section if sectlinks document attribute is set' do - input = <<-EOS -== Installation + input = <<~'EOS' + == Installation -Installation section. + Installation section. -=== Linux + === Linux -Linux installation instructions. + Linux installation instructions. EOS - output = render_embedded_string input, :attributes => {'sectlinks' => ''} + output = convert_string_to_embedded input, attributes: { 'sectlinks' => '' } assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[@class="link"][@href="#_installation"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[text()="Installation"]', output, 1 @@ -1231,170 +1970,206 @@ end context 'Special sections' do - test 'should assign sectname and caption to appendix section' do - input = <<-EOS -[appendix] -== Attribute Options + test 'should ignore style if it matches sectN' do + input = <<~'EOS' + = Document Title + + [sect1] + == Section Level 1 + + content + + [sect2] + == Section Level 2 -Details + content EOS - output = block_from_string input - assert_equal 'appendix', output.sectname - assert_equal 'Appendix A: ', output.caption + output = convert_string input, backend: :docbook + assert_xpath '//section', output, 2 + assert_xpath '//sect1', output, 0 + assert_xpath '//sect2', output, 0 end - test 'should render appendix title prefixed with caption' do - input = <<-EOS -[appendix] -== Attribute Options + test 'should assign sectname, caption, and numeral to appendix section by default' do + input = <<~'EOS' + [appendix] + == Attribute Options -Details + Details EOS - output = render_embedded_string input - assert_xpath '//h2[text()="Appendix A: Attribute Options"]', output, 1 + appendix = block_from_string input + assert_equal 'appendix', appendix.sectname + assert_equal 'Appendix A: ', appendix.caption + assert_equal 'A', appendix.numeral + assert_equal 'A', appendix.number + assert_equal true, appendix.numbered end - test 'should prefix appendix title by label and letter only when numbered is enabled' do - input = <<-EOS -:numbered: + test 'should prefix appendix title by numbered label even when section numbering is disabled' do + input = <<~'EOS' + [appendix] + == Attribute Options -[appendix] -== Attribute Options + Details + EOS -Details + output = convert_string_to_embedded input + assert_xpath '//h2[text()="Appendix A: Attribute Options"]', output, 1 + end + + test 'should use style from last block attribute line above section that defines a style' do + input = <<~'EOS' + [glossary] + [appendix] + == Attribute Options + + Details EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//h2[text()="Appendix A: Attribute Options"]', output, 1 end + test 'setting ID using style shorthand should not clear section style' do + input = <<~'EOS' + [appendix] + [#attribute-options] + == Attribute Options + + Details + EOS + + output = convert_string_to_embedded input + assert_xpath '//h2[@id="attribute-options"][text()="Appendix A: Attribute Options"]', output, 1 + end + test 'should use custom appendix caption if specified' do - input = <<-EOS -:appendix-caption: App + input = <<~'EOS' + :appendix-caption: App -[appendix] -== Attribute Options + [appendix] + == Attribute Options -Details + Details EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//h2[text()="App A: Attribute Options"]', output, 1 end - test 'should only assign letter to appendix when numbered is enabled and appendix caption is empty' do - input = <<-EOS -:numbered: -:appendix-caption: + test 'should only assign letter to appendix when numbered is enabled and appendix caption is not set' do + input = <<~'EOS' + :numbered: + :!appendix-caption: -[appendix] -== Attribute Options + [appendix] + == Attribute Options -Details + Details EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//h2[text()="A. Attribute Options"]', output, 1 end test 'should increment appendix number for each appendix section' do - input = <<-EOS -[appendix] -== Attribute Options + input = <<~'EOS' + [appendix] + == Attribute Options -Details + Details -[appendix] -== Migration + [appendix] + == Migration -Details + Details EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix B: Migration"]', output, 1 end test 'should continue numbering after appendix' do - input = <<-EOS -:numbered: + input = <<~'EOS' + :numbered: -== First Section + == First Section -content + content -[appendix] -== Attribute Options + [appendix] + == Attribute Options -content + content -== Migration + == Migration -content + content EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="1. First Section"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[3][text()="2. Migration"]', output, 1 end test 'should number appendix subsections using appendix letter' do - input = <<-EOS -:numbered: + input = <<~'EOS' + :numbered: -[appendix] -== Attribute Options + [appendix] + == Attribute Options -Details + Details -=== Optional Attributes + === Optional Attributes -Details + Details EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h3)[1][text()="A.1. Optional Attributes"]', output, 1 end test 'should not number level 4 section by default' do - input = <<-EOS -:numbered: + input = <<~'EOS' + :numbered: -== Level_1 + == Level_1 -=== Level_2 + === Level_2 -==== Level_3 + ==== Level_3 -===== Level_4 + ===== Level_4 -text + text EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//h5', output, 1 assert_xpath '//h5[text()="Level_4"]', output, 1 end test 'should only number levels up to value defined by sectnumlevels attribute' do - input = <<-EOS -:numbered: -:sectnumlevels: 2 + input = <<~'EOS' + :numbered: + :sectnumlevels: 2 -== Level_1 + == Level_1 -=== Level_2 + === Level_2 -==== Level_3 + ==== Level_3 -===== Level_4 + ===== Level_4 -text + text EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//h2', output, 1 assert_xpath '//h2[text()="1. Level_1"]', output, 1 assert_xpath '//h3', output, 1 @@ -1406,34 +2181,34 @@ end test 'should not number sections or subsections in regions where numbered is off' do - input = <<-EOS -:numbered: + input = <<~'EOS' + :numbered: -== Section One + == Section One -:numbered!: + :numbered!: -[appendix] -== Attribute Options + [appendix] + == Attribute Options -Details + Details -[appendix] -== Migration + [appendix] + == Migration -Details + Details -=== Gotchas + === Gotchas -Details + Details -[glossary] -== Glossary + [glossary] + == Glossary -Terms + Terms EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="1. Section One"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[3][text()="Appendix B: Migration"]', output, 1 @@ -1442,35 +2217,35 @@ end test 'should not number sections or subsections in toc in regions where numbered is off' do - input = <<-EOS -:numbered: -:toc: + input = <<~'EOS' + :numbered: + :toc: -== Section One + == Section One -:numbered!: + :numbered!: -[appendix] -== Attribute Options + [appendix] + == Attribute Options -Details + Details -[appendix] -== Migration + [appendix] + == Migration -Details + Details -=== Gotchas + === Gotchas -Details + Details -[glossary] -== Glossary + [glossary] + == Glossary -Terms + Terms EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="toc"]/ul//li/a[text()="1. Section One"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix B: Migration"]', output, 1 @@ -1479,404 +2254,529 @@ end test 'should only number sections in toc up to value defined by sectnumlevels attribute' do - input = <<-EOS -:numbered: -:toc: -:sectnumlevels: 2 -:toclevels: 3 + input = <<~'EOS' + :numbered: + :toc: + :sectnumlevels: 2 + :toclevels: 3 -== Level 1 + == Level 1 -=== Level 2 + === Level 2 -==== Level 3 + ==== Level 3 EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="toc"]//a[@href="#_level_1"][text()="1. Level 1"]', output, 1 assert_xpath '//*[@id="toc"]//a[@href="#_level_2"][text()="1.1. Level 2"]', output, 1 assert_xpath '//*[@id="toc"]//a[@href="#_level_3"][text()="Level 3"]', output, 1 end - # reenable once we have :specialnumbered!: implemented -=begin - test 'should not number special sections or subsections' do - input = <<-EOS -:numbered: -:specialnumbered!: + test 'should not number special sections or their subsections by default except for appendices' do + input = <<~'EOS' + :doctype: book + :sectnums: -== Section One + [preface] + == Preface -[appendix] -== Attribute Options + === Preface Subsection -Details + content -[appendix] -== Migration + == Section One -Details + content -=== Gotchas + [appendix] + == Attribute Options -Details + Details -[glossary] -== Glossary + [appendix] + == Migration -Terms + Details + + === Gotchas + + Details + + [glossary] + == Glossary + + Terms EOS - output = render_embedded_string input - assert_xpath '(//h2)[1][text()="1. Section One"]', output, 1 - assert_xpath '(//h2)[2][text()="Appendix A: Attribute Options"]', output, 1 - assert_xpath '(//h2)[3][text()="Appendix B: Migration"]', output, 1 - assert_xpath '(//h3)[1][text()="Gotchas"]', output, 1 - assert_xpath '(//h2)[4][text()="Glossary"]', output, 1 + output = convert_string_to_embedded input + assert_xpath '(//h2)[1][text()="Preface"]', output, 1 + assert_xpath '(//h3)[1][text()="Preface Subsection"]', output, 1 + assert_xpath '(//h2)[2][text()="1. Section One"]', output, 1 + assert_xpath '(//h2)[3][text()="Appendix A: Attribute Options"]', output, 1 + assert_xpath '(//h2)[4][text()="Appendix B: Migration"]', output, 1 + assert_xpath '(//h3)[2][text()="B.1. Gotchas"]', output, 1 + assert_xpath '(//h2)[5][text()="Glossary"]', output, 1 end - test 'should not number special sections or subsections in toc' do - input = <<-EOS -:numbered: -:specialnumbered!: -:toc: + test 'should not number special sections or their subsections in toc by default except for appendices' do + input = <<~'EOS' + :doctype: book + :sectnums: + :toc: + + [preface] + == Preface + + === Preface Subsection -== Section One + content -[appendix] -== Attribute Options + == Section One -Details + content -[appendix] -== Migration + [appendix] + == Attribute Options -Details + Details -=== Gotchas + [appendix] + == Migration -Details + Details -[glossary] -== Glossary + === Gotchas -Terms + Details + + [glossary] + == Glossary + + Terms EOS - output = render_string input + output = convert_string input + assert_xpath '//*[@id="toc"]/ul//li/a[text()="Preface"]', output, 1 + assert_xpath '//*[@id="toc"]/ul//li/a[text()="Preface Subsection"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="1. Section One"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix B: Migration"]', output, 1 - assert_xpath '//*[@id="toc"]/ul//li/a[text()="Gotchas"]', output, 1 + assert_xpath '//*[@id="toc"]/ul//li/a[text()="B.1. Gotchas"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Glossary"]', output, 1 end -=end - test 'level 0 special sections in multipart book should be rendered as level 1' do - input = <<-EOS -= Multipart Book -Doc Writer -:doctype: book + test 'should number special sections and their subsections when sectnums is all' do + input = <<~'EOS' + :doctype: book + :sectnums: all + + [preface] + == Preface + + === Preface Subsection + + content + + == Section One + + content + + [appendix] + == Attribute Options + + Details + + [appendix] + == Migration + + Details + + === Gotchas + + Details + + [glossary] + == Glossary + + Terms + EOS + + output = convert_string_to_embedded input + assert_xpath '(//h2)[1][text()="1. Preface"]', output, 1 + assert_xpath '(//h3)[1][text()="1.1. Preface Subsection"]', output, 1 + assert_xpath '(//h2)[2][text()="2. Section One"]', output, 1 + assert_xpath '(//h2)[3][text()="Appendix A: Attribute Options"]', output, 1 + assert_xpath '(//h2)[4][text()="Appendix B: Migration"]', output, 1 + assert_xpath '(//h3)[2][text()="B.1. Gotchas"]', output, 1 + assert_xpath '(//h2)[5][text()="3. Glossary"]', output, 1 + end + + test 'should number special sections and their subsections in toc when sectnums is all' do + input = <<~'EOS' + :doctype: book + :sectnums: all + :toc: + + [preface] + == Preface + + === Preface Subsection + + content + + == Section One + + content + + [appendix] + == Attribute Options + + Details + + [appendix] + == Migration + + Details + + === Gotchas + + Details + + [glossary] + == Glossary + + Terms + EOS + + output = convert_string input + assert_xpath '//*[@id="toc"]/ul//li/a[text()="1. Preface"]', output, 1 + assert_xpath '//*[@id="toc"]/ul//li/a[text()="1.1. Preface Subsection"]', output, 1 + assert_xpath '//*[@id="toc"]/ul//li/a[text()="2. Section One"]', output, 1 + assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix A: Attribute Options"]', output, 1 + assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix B: Migration"]', output, 1 + assert_xpath '//*[@id="toc"]/ul//li/a[text()="B.1. Gotchas"]', output, 1 + assert_xpath '//*[@id="toc"]/ul//li/a[text()="3. Glossary"]', output, 1 + end + + test 'level 0 special sections in multipart book should be coerced to level 1' do + input = <<~'EOS' + = Multipart Book + Doc Writer + :doctype: book -[preface] -= Preface + [preface] + = Preface -Preface text + Preface text -[appendix] -= Appendix + [appendix] + = Appendix -Appendix text + Appendix text EOS - output = render_string input + output = convert_string input assert_xpath '//h2[@id = "_preface"]', output, 1 assert_xpath '//h2[@id = "_appendix"]', output, 1 end - test 'should output docbook elements that coorespond to special sections in book doctype' do - input = <<-EOS -= Multipart Book -:doctype: book -:idprefix: + test 'should output docbook elements that correspond to special sections in book doctype' do + input = <<~'EOS' + = Multipart Book + :doctype: book + :idprefix: -[abstract] -= Abstract Title + [abstract] + = Abstract Title -Normal chapter (no abstract in book) + Normal chapter (no abstract in book) -[dedication] -= Dedication Title + [dedication] + = Dedication Title -Dedication content + Dedication content -[preface] -= Preface Title + [preface] + = Preface Title -Preface content + Preface content -=== Preface sub-section + === Preface sub-section -Preface subsection content + Preface subsection content -= Part 1 + = Part 1 -[partintro] -.Part intro title -Part intro content + [partintro] + .Part intro title + Part intro content -== Chapter 1 + == Chapter 1 -blah blah + blah blah -== Chapter 2 + == Chapter 2 -blah blah + blah blah -= Part 2 + = Part 2 -[partintro] -blah blah + [partintro] + blah blah -== Chapter 3 + == Chapter 3 -blah blah + blah blah -== Chapter 4 + == Chapter 4 -blah blah + blah blah -[appendix] -= Appendix Title + [appendix] + = Appendix Title -Appendix content + Appendix content -=== Appendix sub-section + === Appendix sub-section -Appendix sub-section content + Appendix sub-section content -[bibliography] -= Bibliography Title + [bibliography] + = Bibliography Title -Bibliography content + Bibliography content -[glossary] -= Glossary Title + [glossary] + = Glossary Title -Glossary content + Glossary content -[colophon] -= Colophon Title + [colophon] + = Colophon Title -Colophon content + Colophon content -[index] -= Index Title + [index] + = Index Title EOS - output = render_embedded_string input, :backend => 'docbook45' - assert_xpath '/chapter[@id="abstract_title"]', output, 1 - assert_xpath '/chapter[@id="abstract_title"]/title[text()="Abstract Title"]', output, 1 - assert_xpath '/chapter/following-sibling::dedication[@id="dedication_title"]', output, 1 - assert_xpath '/chapter/following-sibling::dedication[@id="dedication_title"]/title[text()="Dedication Title"]', output, 1 - assert_xpath '/dedication/following-sibling::preface[@id="preface_title"]', output, 1 - assert_xpath '/dedication/following-sibling::preface[@id="preface_title"]/title[text()="Preface Title"]', output, 1 - assert_xpath '/preface/section[@id="preface_sub_section"]', output, 1 - assert_xpath '/preface/section[@id="preface_sub_section"]/title[text()="Preface sub-section"]', output, 1 - assert_xpath '/preface/following-sibling::part[@id="part_1"]', output, 1 - assert_xpath '/preface/following-sibling::part[@id="part_1"]/title[text()="Part 1"]', output, 1 - assert_xpath '/part[@id="part_1"]/partintro', output, 1 - assert_xpath '/part[@id="part_1"]/partintro/title[text()="Part intro title"]', output, 1 - assert_xpath '/part[@id="part_1"]/partintro/following-sibling::chapter[@id="chapter_1"]', output, 1 - assert_xpath '/part[@id="part_1"]/partintro/following-sibling::chapter[@id="chapter_1"]/title[text()="Chapter 1"]', output, 1 - assert_xpath '(/part)[2]/following-sibling::appendix[@id="appendix_title"]', output, 1 - assert_xpath '(/part)[2]/following-sibling::appendix[@id="appendix_title"]/title[text()="Appendix Title"]', output, 1 - assert_xpath '/appendix/section[@id="appendix_sub_section"]', output, 1 - assert_xpath '/appendix/section[@id="appendix_sub_section"]/title[text()="Appendix sub-section"]', output, 1 - assert_xpath '/appendix/following-sibling::bibliography[@id="bibliography_title"]', output, 1 - assert_xpath '/appendix/following-sibling::bibliography[@id="bibliography_title"]/title[text()="Bibliography Title"]', output, 1 - assert_xpath '/bibliography/following-sibling::glossary[@id="glossary_title"]', output, 1 - assert_xpath '/bibliography/following-sibling::glossary[@id="glossary_title"]/title[text()="Glossary Title"]', output, 1 - assert_xpath '/glossary/following-sibling::colophon[@id="colophon_title"]', output, 1 - assert_xpath '/glossary/following-sibling::colophon[@id="colophon_title"]/title[text()="Colophon Title"]', output, 1 - assert_xpath '/colophon/following-sibling::index[@id="index_title"]', output, 1 - assert_xpath '/colophon/following-sibling::index[@id="index_title"]/title[text()="Index Title"]', output, 1 + output = convert_string input, backend: 'docbook' + assert_xpath '/book/chapter[@xml:id="abstract_title"]', output, 1 + assert_xpath '/book/chapter[@xml:id="abstract_title"]/title[text()="Abstract Title"]', output, 1 + assert_xpath '/book/chapter/following-sibling::dedication[@xml:id="dedication_title"]', output, 1 + assert_xpath '/book/chapter/following-sibling::dedication[@xml:id="dedication_title"]/title[text()="Dedication Title"]', output, 1 + assert_xpath '/book/dedication/following-sibling::preface[@xml:id="preface_title"]', output, 1 + assert_xpath '/book/dedication/following-sibling::preface[@xml:id="preface_title"]/title[text()="Preface Title"]', output, 1 + assert_xpath '/book/preface/section[@xml:id="preface_sub_section"]', output, 1 + assert_xpath '/book/preface/section[@xml:id="preface_sub_section"]/title[text()="Preface sub-section"]', output, 1 + assert_xpath '/book/preface/following-sibling::part[@xml:id="part_1"]', output, 1 + assert_xpath '/book/preface/following-sibling::part[@xml:id="part_1"]/title[text()="Part 1"]', output, 1 + assert_xpath '/book/part[@xml:id="part_1"]/partintro', output, 1 + assert_xpath '/book/part[@xml:id="part_1"]/partintro/title[text()="Part intro title"]', output, 1 + assert_xpath '/book/part[@xml:id="part_1"]/partintro/following-sibling::chapter[@xml:id="chapter_1"]', output, 1 + assert_xpath '/book/part[@xml:id="part_1"]/partintro/following-sibling::chapter[@xml:id="chapter_1"]/title[text()="Chapter 1"]', output, 1 + assert_xpath '(/book/part)[2]/following-sibling::appendix[@xml:id="appendix_title"]', output, 1 + assert_xpath '(/book/part)[2]/following-sibling::appendix[@xml:id="appendix_title"]/title[text()="Appendix Title"]', output, 1 + assert_xpath '/book/appendix/section[@xml:id="appendix_sub_section"]', output, 1 + assert_xpath '/book/appendix/section[@xml:id="appendix_sub_section"]/title[text()="Appendix sub-section"]', output, 1 + assert_xpath '/book/appendix/following-sibling::bibliography[@xml:id="bibliography_title"]', output, 1 + assert_xpath '/book/appendix/following-sibling::bibliography[@xml:id="bibliography_title"]/title[text()="Bibliography Title"]', output, 1 + assert_xpath '/book/bibliography/following-sibling::glossary[@xml:id="glossary_title"]', output, 1 + assert_xpath '/book/bibliography/following-sibling::glossary[@xml:id="glossary_title"]/title[text()="Glossary Title"]', output, 1 + assert_xpath '/book/glossary/following-sibling::colophon[@xml:id="colophon_title"]', output, 1 + assert_xpath '/book/glossary/following-sibling::colophon[@xml:id="colophon_title"]/title[text()="Colophon Title"]', output, 1 + assert_xpath '/book/colophon/following-sibling::index[@xml:id="index_title"]', output, 1 + assert_xpath '/book/colophon/following-sibling::index[@xml:id="index_title"]/title[text()="Index Title"]', output, 1 end test 'abstract section maps to abstract element in docbook for article doctype' do - input = <<-EOS -= Article -:idprefix: + input = <<~'EOS' + = Article + :idprefix: -[abstract] -== Abstract Title + [abstract] + == Abstract Title -Abstract content + Abstract content EOS - output = render_embedded_string input, :backend => 'docbook45' - assert_xpath '/abstract[@id="abstract_title"]', output, 1 - assert_xpath '/abstract[@id="abstract_title"]/title[text()="Abstract Title"]', output, 1 + output = convert_string_to_embedded input, backend: 'docbook' + assert_xpath '/abstract[@xml:id="abstract_title"]', output, 1 + assert_xpath '/abstract[@xml:id="abstract_title"]/title[text()="Abstract Title"]', output, 1 end test 'should allow a special section to be nested at arbitrary depth in DocBook output' do - input = <<-EOS -= Document Title -:doctype: book + input = <<~'EOS' + = Document Title + :doctype: book -== Glossaries + == Glossaries -[glossary] -=== Glossary A + [glossary] + === Glossary A -Glossaries are optional. -Glossaries entries are an example of a style of AsciiDoc labeled lists. + Glossaries are optional. + Glossaries entries are an example of a style of AsciiDoc description lists. -[glossary] -A glossary term:: -The corresponding definition. + [glossary] + A glossary term:: + The corresponding definition. -A second glossary term:: -The corresponding definition. + A second glossary term:: + The corresponding definition. EOS - output = render_string input, :backend => :docbook + output = convert_string input, backend: :docbook assert_xpath '//glossary', output, 1 assert_xpath '//chapter/glossary', output, 1 assert_xpath '//glossary/title[text()="Glossary A"]', output, 1 assert_xpath '//glossary/glossentry', output, 2 end + + test 'should drop title on special section in DocBook output if untitled option is set' do + input = <<~'EOS' + [dedication%untitled] + == Dedication + + content + EOS + + output = convert_string_to_embedded input, backend: :docbook + assert_xpath '/dedication', output, 1 + assert_xpath '/dedication/title', output, 0 + end end context "heading patterns in blocks" do test "should not interpret a listing block as a heading" do - input = <<-EOS -Section -------- + input = <<~'EOS' + Section + ------- ----- -code ----- + ---- + code + ---- -fin. + fin. EOS - output = render_string input + output = convert_string input assert_xpath "//h2", output, 1 end test "should not interpret an open block as a heading" do - input = <<-EOS -Section -------- + input = <<~'EOS' + Section + ------- --- -ha --- + -- + ha + -- -fin. + fin. EOS - output = render_string input + output = convert_string input assert_xpath "//h2", output, 1 end test "should not interpret an attribute list as a heading" do - input = <<-EOS -Section -======= - -preamble - -[TIP] -==== -This should be a tip, not a heading. -==== + input = <<~'EOS' + Section + ======= + + preamble + + [TIP] + ==== + This should be a tip, not a heading. + ==== EOS - output = render_string input + output = convert_string input assert_xpath "//*[@class='admonitionblock tip']//p[text() = 'This should be a tip, not a heading.']", output, 1 end - test "should not match a heading in a labeled list" do - input = <<-EOS -Section -------- - -term1:: -+ ----- -list = [1, 2, 3]; ----- -term2:: -== not a heading -term3:: def + test "should not match a heading in a description list" do + input = <<~'EOS' + Section + ------- + + term1:: + + + ---- + list = [1, 2, 3]; + ---- + term2:: + == not a heading + term3:: def -// + // -fin. + fin. EOS - output = render_string input + output = convert_string input assert_xpath "//h2", output, 1 assert_xpath "//dl", output, 1 end test "should not match a heading in a bulleted list" do - input = <<-EOS -Section -------- - -* first -+ ----- -list = [1, 2, 3]; ----- -+ -* second -== not a heading -* third + input = <<~'EOS' + Section + ------- + + * first + + + ---- + list = [1, 2, 3]; + ---- + + + * second + == not a heading + * third -fin. + fin. EOS - output = render_string input + output = convert_string input assert_xpath "//h2", output, 1 assert_xpath "//ul", output, 1 end test "should not match a heading in a block" do - input = <<-EOS -==== + input = <<~'EOS' + ==== -== not a heading + == not a heading -==== + ==== EOS - output = render_string input + output = convert_string input assert_xpath "//h2", output, 0 assert_xpath "//*[@class='exampleblock']//p[text() = '== not a heading']", output, 1 end end context 'Table of Contents' do - test 'should render unnumbered table of contents in header if toc attribute is set' do - input = <<-EOS -= Article -:toc: + test 'should output unnumbered table of contents in header if toc attribute is set' do + input = <<~'EOS' + = Article + :toc: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... -=== Interlude + === Interlude -While they were waiting... + While they were waiting... -== Section Three + == Section Three -That's all she wrote! + That's all she wrote! EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 @@ -1891,29 +2791,29 @@ assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="Section Three"]', output, 1 end - test 'should render numbered table of contents in header if toc and numbered attributes are set' do - input = <<-EOS -= Article -:toc: -:numbered: + test 'should output numbered table of contents in header if toc and numbered attributes are set' do + input = <<~'EOS' + = Article + :toc: + :numbered: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... -=== Interlude + === Interlude -While they were waiting... + While they were waiting... -== Section Three + == Section Three -That's all she wrote! + That's all she wrote! EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 @@ -1925,31 +2825,31 @@ assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="3. Section Three"]', output, 1 end - test 'should render a table of contents that honors numbered setting at position of section in document' do - input = <<-EOS -= Article -:toc: -:numbered: + test 'should output a table of contents that honors numbered setting at position of section in document' do + input = <<~'EOS' + = Article + :toc: + :numbered: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... -=== Interlude + === Interlude -While they were waiting... + While they were waiting... -:numbered!: + :numbered!: -== Section Three + == Section Three -That's all she wrote! + That's all she wrote! EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 @@ -1960,30 +2860,30 @@ end test 'should not number parts in table of contents for book doctype when numbered attribute is set' do - input = <<-EOS -= Book -:doctype: book -:toc: -:numbered: + input = <<~'EOS' + = Book + :doctype: book + :toc: + :numbered: -= Part 1 + = Part 1 -== First Section of Part 1 + == First Section of Part 1 -blah + blah -== Second Section of Part 1 + == Second Section of Part 1 -blah + blah -= Part 2 + = Part 2 -== First Section of Part 2 + == First Section of Part 2 -blah + blah EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="toc"]', output, 1 assert_xpath '//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="toc"]/ul[@class="sectlevel0"]', output, 1 @@ -1996,157 +2896,157 @@ assert_xpath '((//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/ul/li)[1]/a[text()="1. First Section of Part 1"]', output, 1 end - test 'should render table of contents in header if toc2 attribute is set' do - input = <<-EOS -= Article -:toc2: -:numbered: + test 'should output table of contents in header if toc2 attribute is set' do + input = <<~'EOS' + = Article + :toc2: + :numbered: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_xpath '//body[@class="article toc2 toc-left"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc attribute is set to position' do - input = <<-EOS -= Article -:toc: > -:numbered: + input = <<~'EOS' + = Article + :toc: > + :numbered: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc and toc-position attributes are set' do - input = <<-EOS -= Article -:toc: -:toc-position: right -:numbered: + input = <<~'EOS' + = Article + :toc: + :toc-position: right + :numbered: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc2 and toc-position attribute are set' do - input = <<-EOS -= Article -:toc2: -:toc-position: right -:numbered: + input = <<~'EOS' + = Article + :toc2: + :toc-position: right + :numbered: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc attribute is set to direction' do - input = <<-EOS -= Article -:toc: right -:numbered: + input = <<~'EOS' + = Article + :toc: right + :numbered: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc placement to preamble if toc attribute is set to preamble' do - input = <<-EOS -= Article -:toc: preamble + input = <<~'EOS' + = Article + :toc: preamble -Yada yada + Yada yada -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_css '#preamble #toc', output, 1 assert_css '#preamble .sectionbody + #toc', output, 1 end test 'should use document attributes toc-class, toc-title and toclevels to create toc' do - input = <<-EOS -= Article -:toc: -:toc-title: Contents -:toc-class: toc2 -:toclevels: 1 + input = <<~'EOS' + = Article + :toc: + :toc-title: Contents + :toc-class: toc2 + :toclevels: 1 -== Section 1 + == Section 1 -=== Section 1.1 + === Section 1.1 -==== Section 1.1.1 + ==== Section 1.1.1 -==== Section 1.1.2 + ==== Section 1.1.2 -=== Section 1.2 + === Section 1.2 -== Section 2 + == Section 2 -Fin. + Fin. EOS - output = render_string input + output = convert_string input assert_css '#header #toc', output, 1 assert_css '#header #toc.toc2', output, 1 assert_css '#header #toc li', output, 2 @@ -2154,115 +3054,115 @@ assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Contents"]', output, 1 end - test 'should not render table of contents if toc-placement attribute is unset' do - input = <<-EOS -= Article -:toc: -:toc-placement!: + test 'should not output table of contents if toc-placement attribute is unset' do + input = <<~'EOS' + = Article + :toc: + :toc-placement!: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="toc"]', output, 0 end - test 'should render table of contents at location of toc macro' do - input = <<-EOS -= Article -:toc: -:toc-placement: macro + test 'should output table of contents at location of toc macro' do + input = <<~'EOS' + = Article + :toc: + :toc-placement: macro -Once upon a time... + Once upon a time... -toc::[] + toc::[] -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_css '#preamble #toc', output, 1 assert_css '#preamble .paragraph + #toc', output, 1 end - test 'should render table of contents at location of toc macro in embedded document' do - input = <<-EOS -= Article -:toc: -:toc-placement: macro + test 'should output table of contents at location of toc macro in embedded document' do + input = <<~'EOS' + = Article + :toc: + :toc-placement: macro -Once upon a time... + Once upon a time... -toc::[] + toc::[] -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input, :header_footer => false + output = convert_string_to_embedded input assert_css '#preamble:root #toc', output, 1 assert_css '#preamble:root .paragraph + #toc', output, 1 end - test 'should render table of contents at default location in embedded document if toc attribute is set' do - input = <<-EOS -= Article -:showtitle: -:toc: + test 'should output table of contents at default location in embedded document if toc attribute is set' do + input = <<~'EOS' + = Article + :showtitle: + :toc: -Once upon a time... + Once upon a time... -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input, :header_footer => false + output = convert_string_to_embedded input assert_css 'h1:root', output, 1 assert_css 'h1:root + #toc:root', output, 1 assert_css 'h1:root + #toc:root + #preamble:root', output, 1 end test 'should not activate toc macro if toc-placement is not set' do - input = <<-EOS -= Article -:toc: + input = <<~'EOS' + = Article + :toc: -Once upon a time... + Once upon a time... -toc::[] + toc::[] -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_css '#toc', output, 1 assert_css '#toctitle', output, 1 @@ -2271,24 +3171,24 @@ end test 'should only output toc at toc macro if toc is macro' do - input = <<-EOS -= Article -:toc: macro + input = <<~'EOS' + = Article + :toc: macro -Once upon a time... + Once upon a time... -toc::[] + toc::[] -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... EOS - output = render_string input + output = convert_string input assert_css '#toc', output, 1 assert_css '#toctitle', output, 1 @@ -2297,34 +3197,34 @@ end test 'should use global attributes for toc-title, toc-class and toclevels for toc macro' do - input = <<-EOS -= Article -:toc: -:toc-placement: macro -:toc-title: Contents -:toc-class: contents -:toclevels: 1 + input = <<~'EOS' + = Article + :toc: + :toc-placement: macro + :toc-title: Contents + :toc-class: contents + :toclevels: 1 -Preamble. + Preamble. -toc::[] + toc::[] -== Section 1 + == Section 1 -=== Section 1.1 + === Section 1.1 -==== Section 1.1.1 + ==== Section 1.1.1 -==== Section 1.1.2 + ==== Section 1.1.2 -=== Section 1.2 + === Section 1.2 -== Section 2 + == Section 2 -Fin. + Fin. EOS - output = render_string input + output = convert_string input assert_css '#toc', output, 1 assert_css '#toctitle', output, 1 assert_css '#preamble #toc', output, 1 @@ -2336,38 +3236,38 @@ end test 'should honor id, title, role and level attributes on toc macro' do - input = <<-EOS -= Article -:toc: -:toc-placement: macro -:toc-title: Ignored -:toc-class: ignored -:toclevels: 5 -:tocdepth: 1 + input = <<~'EOS' + = Article + :toc: + :toc-placement: macro + :toc-title: Ignored + :toc-class: ignored + :toclevels: 5 + :tocdepth: 1 -Preamble. + Preamble. -[[contents]] -[role="contents"] -.Contents -toc::[levels={tocdepth}] + [[contents]] + [role="contents"] + .Contents + toc::[levels={tocdepth}] -== Section 1 + == Section 1 -=== Section 1.1 + === Section 1.1 -==== Section 1.1.1 + ==== Section 1.1.1 -==== Section 1.1.2 + ==== Section 1.1.2 -=== Section 1.2 + === Section 1.2 -== Section 2 + == Section 2 -Fin. + Fin. EOS - output = render_string input + output = convert_string input assert_css '#toc', output, 0 assert_css '#toctitle', output, 0 assert_css '#preamble #contents', output, 1 @@ -2379,27 +3279,27 @@ end test 'child toc levels should not have additional bullet at parent level in html' do - input = <<-EOS -= Article -:toc: + input = <<~'EOS' + = Article + :toc: -== Section One + == Section One -It was a dark and stormy night... + It was a dark and stormy night... -== Section Two + == Section Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... -=== Interlude + === Interlude -While they were waiting... + While they were waiting... -== Section Three + == Section Three -That's all she wrote! + That's all she wrote! EOS - output = render_string input + output = convert_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 @@ -2413,57 +3313,112 @@ end test 'should not display a table of contents if document has no sections' do - input_src = <<-EOS -= Document Title -:toc: + input_src = <<~'EOS' + = Document Title + :toc: -toc::[] + toc::[] -This document has no sections. + This document has no sections. -It only has content. + It only has content. EOS ['', 'left', 'preamble', 'macro'].each do |placement| input = input_src.gsub(':toc:', "\\& #{placement}") - output = render_string input + output = convert_string input assert_css '#toctitle', output, 0 end end + + test 'should drop anchors from contents of entries in table of contents' do + input = <<~'EOS' + = Document Title + :toc: + + == [[un]]Section One + + content + + == [[two]][[deux]]Section Two + + content + + == Plant Trees by https://ecosia.org[Searching] + + content + EOS + + output = convert_string_to_embedded input + assert_xpath '/*[@id="toc"]', output, 1 + toc_links = xmlnodes_at_xpath '/*[@id="toc"]//li', output + assert_equal 3, toc_links.size + assert_equal 'Section One', toc_links[0].inner_html + assert_equal 'Section Two', toc_links[1].inner_html + assert_equal 'Plant Trees by Searching', toc_links[2].inner_html + end + + test 'should not remove non-anchor tags from contents of entries in table of contents' do + input = <<~'EOS' + = Document Title + :toc: + :icons: font + + == `run` command + + content + + == icon:bug[] Issues + + content + + == https://ecosia.org[_Sustainable_ Searches] + + content + EOS + + output = convert_string_to_embedded input, safe: :safe + assert_xpath '/*[@id="toc"]', output, 1 + toc_links = xmlnodes_at_xpath '/*[@id="toc"]//li', output + assert_equal 3, toc_links.size + assert_equal 'run command', toc_links[0].inner_html + assert_equal ' Issues', toc_links[1].inner_html + assert_equal 'Sustainable Searches', toc_links[2].inner_html + end end context 'article doctype' do - test 'should create sections only in docbook backend' do - input = <<-EOS -= Article -Doc Writer + test 'should create only sections in docbook backend' do + input = <<~'EOS' + = Article + Doc Writer -== Section 1 + == Section 1 -The adventure. + The adventure. -=== Subsection One + === Subsection One -It was a dark and stormy night... + It was a dark and stormy night... -=== Subsection Two + === Subsection Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... -== Section 2 + == Section 2 -The return. + The return. -=== Subsection Three + === Subsection Three -While they were returning... + While they were returning... -=== Subsection Four + === Subsection Four -That's all she wrote! + That's all she wrote! EOS - output = render_string input, :backend => 'docbook' + output = convert_string input, backend: 'docbook' assert_xpath '//part', output, 0 assert_xpath '//chapter', output, 0 assert_xpath '/article/section', output, 2 @@ -2477,37 +3432,37 @@ context 'book doctype' do test 'document title with level 0 headings' do - input = <<-EOS -= Book -Doc Writer -:doctype: book + input = <<~'EOS' + = Book + Doc Writer + :doctype: book -= Chapter One + = Chapter One -[partintro] -It was a dark and stormy night... + [partintro] + It was a dark and stormy night... -== Scene One + == Scene One -Someone's gonna get axed. + Someone's gonna get axed. -= Chapter Two + = Chapter Two -[partintro] -They couldn't believe their eyes when... + [partintro] + They couldn't believe their eyes when... -== Interlude + == Interlude -While they were waiting... + While they were waiting... -= Chapter Three + = Chapter Three -== Scene One + == Scene One -That's all she wrote! + That's all she wrote! EOS - output = render_string(input) + output = convert_string(input) assert_css 'body.book', output, 1 assert_css 'h1', output, 4 assert_css '#header h1', output, 1 @@ -2520,16 +3475,90 @@ assert_xpath '//h1[@id="_chapter_three"][text() = "Chapter Three"]', output, 1 end + test 'should print error if level 0 section comes after nested section and doctype is not book' do + input = <<~'EOS' + = Document Title + + == Level 1 Section + + === Level 2 Section + + = Level 0 Section + EOS + + using_memory_logger do |logger| + convert_string input + assert_message logger, :ERROR, ': line 7: level 0 sections can only be used when doctype is book', Hash + end + end + + test 'should add class matching role to part' do + input = <<~'EOS' + = Book Title + :doctype: book + + [.newbie] + = Part 1 + + == Chapter A + + content + + = Part 2 + + == Chapter B + + content + EOS + + result = convert_string_to_embedded input + assert_css 'h1.sect0', result, 2 + assert_css 'h1.sect0.newbie', result, 1 + assert_css 'h1.sect0.newbie#_part_1', result, 1 + end + + test 'should assign appropriate sectname for section type' do + input = <<~'EOS' + = Book Title + :doctype: book + :idprefix: + :idseparator: - + + = Part Title + + == Chapter Title + + === Section Title + + content + + [appendix] + == Appendix Title + + === Appendix Section Title + + content + EOS + + doc = document_from_string input + assert_equal 'header', doc.header.sectname + assert_equal 'part', (doc.find_by id: 'part-title')[0].sectname + assert_equal 'chapter', (doc.find_by id: 'chapter-title')[0].sectname + assert_equal 'section', (doc.find_by id: 'section-title')[0].sectname + assert_equal 'appendix', (doc.find_by id: 'appendix-title')[0].sectname + assert_equal 'section', (doc.find_by id: 'appendix-section-title')[0].sectname + end + test 'should add partintro style to child paragraph of part' do - input = <<-EOS -= Book -:doctype: book + input = <<~'EOS' + = Book + :doctype: book -= Part 1 + = Part 1 -part intro + part intro -== Chapter 1 + == Chapter 1 EOS doc = document_from_string input @@ -2539,17 +3568,17 @@ end test 'should add partintro style to child open block of part' do - input = <<-EOS -= Book -:doctype: book + input = <<~'EOS' + = Book + :doctype: book -= Part 1 + = Part 1 --- -part intro --- + -- + part intro + -- -== Chapter 1 + == Chapter 1 EOS doc = document_from_string input @@ -2559,17 +3588,17 @@ end test 'should wrap child paragraphs of part in partintro open block' do - input = <<-EOS -= Book -:doctype: book + input = <<~'EOS' + = Book + :doctype: book -= Part 1 + = Part 1 -part intro + part intro -more part intro + more part intro -== Chapter 1 + == Chapter 1 EOS doc = document_from_string input @@ -2582,61 +3611,56 @@ end test 'should warn if part has no sections' do - input = <<-EOS -= Book -:doctype: book + input = <<~'EOS' + = Book + :doctype: book -= Part 1 + = Part 1 -[partintro] -intro + [partintro] + intro EOS - doc = warnings = nil - redirect_streams do |out, err| - doc = document_from_string input - warnings = err.string + using_memory_logger do |logger| + document_from_string input + assert_message logger, :ERROR, ': line 8: invalid part, must have at least one section (e.g., chapter, appendix, etc.)', Hash end - - refute_nil warnings - assert !warnings.empty? - assert_match(/ERROR:.*section/, warnings) end test 'should create parts and chapters in docbook backend' do - input = <<-EOS -= Book -Doc Writer -:doctype: book + input = <<~'EOS' + = Book + Doc Writer + :doctype: book -= Part 1 + = Part 1 -[partintro] -The adventure. + [partintro] + The adventure. -== Chapter One + == Chapter One -It was a dark and stormy night... + It was a dark and stormy night... -== Chapter Two + == Chapter Two -They couldn't believe their eyes when... + They couldn't believe their eyes when... -= Part 2 + = Part 2 -[partintro] -The return. + [partintro] + The return. -== Chapter Three + == Chapter Three -While they were returning... + While they were returning... -== Chapter Four + == Chapter Four -That's all she wrote! + That's all she wrote! EOS - output = render_string input, :backend => 'docbook' + output = convert_string input, backend: 'docbook' assert_xpath '//chapter/chapter', output, 0 assert_xpath '/book/part', output, 2 assert_xpath '/book/part[1]/title[text() = "Part 1"]', output, 1 @@ -2647,46 +3671,45 @@ end test 'subsections in preface and appendix should start at level 2' do - input = <<-EOS -= Multipart Book -Doc Writer -:doctype: book + input = <<~'EOS' + = Multipart Book + Doc Writer + :doctype: book -[preface] -= Preface + [preface] + = Preface -Preface content + Preface content -=== Preface subsection + === Preface subsection -Preface subsection content + Preface subsection content -= Part 1 + = Part 1 -.Part intro title -[partintro] -Part intro content + .Part intro title + [partintro] + Part intro content -== Chapter 1 + == Chapter 1 -content + content -[appendix] -= Appendix + [appendix] + = Appendix -Appendix content + Appendix content -=== Appendix subsection + === Appendix subsection -Appendix subsection content + Appendix subsection content EOS - output = warnings = nil - redirect_streams do |out, err| - output = render_string input, :backend => 'docbook' - warnings = err.string + output = nil + using_memory_logger do |logger| + output = convert_string input, backend: 'docbook' + assert logger.empty? end - assert warnings.empty? assert_xpath '/book/preface', output, 1 assert_xpath '/book/preface/section', output, 1 assert_xpath '/book/part', output, 1 diff -Nru asciidoctor-1.5.5/test/substitutions_test.rb asciidoctor-2.0.10/test/substitutions_test.rb --- asciidoctor-1.5.5/test/substitutions_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/substitutions_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,38 +1,71 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' # TODO # - test negatives # - test role on every quote type context 'Substitutions' do + BACKSLASH = ?\\ context 'Dispatcher' do test 'apply normal substitutions' do para = block_from_string("[blue]_http://asciidoc.org[AsciiDoc]_ & [red]*Ruby*\n§ Making +++documentation+++ together +\nsince (C) {inception_year}.") para.document.attributes['inception_year'] = '2012' - result = para.apply_normal_subs(para.lines) + result = para.apply_subs(para.source) assert_equal %{AsciiDoc & Ruby\n§ Making documentation together
    \nsince © 2012.}, result end + + test 'apply_subs should not modify string directly' do + input = ' -- the root of all web' + para = block_from_string input + para_source = para.source + result = para.apply_subs para_source + assert_equal '<html> — the root of all web', result + assert_equal input, para_source + end + + test 'should not drop trailing blank lines when performing substitutions' do + para = block_from_string %([%hardbreaks]\nthis\nis\n-> {program}) + para.lines << '' + para.lines << '' + para.document.attributes['program'] = 'Asciidoctor' + result = para.apply_subs(para.lines) + assert_equal ['this
    ', 'is
    ', '→ Asciidoctor
    ', '
    ', ''], result + result = para.apply_subs(para.lines * "\n") + assert_equal %(this
    \nis
    \n→ Asciidoctor
    \n
    \n), result + end + + test 'should expand subs passed to expand_subs' do + para = block_from_string %({program}\n*bold*\n2 > 1) + para.document.attributes['program'] = 'Asciidoctor' + assert_equal [:specialcharacters], (para.expand_subs [:specialchars]) + refute para.expand_subs([:none]) + assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], (para.expand_subs [:normal]) + end + + test 'apply_subs should allow the subs argument to be nil' do + block = block_from_string %([pass]\n*raw*) + result = block.apply_subs block.source, nil + assert_equal '*raw*', result + end end context 'Quotes' do - BACKSLASH = '\\' - test 'single-line double-quoted string' do - para = block_from_string(%q{``a few quoted words''}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{``a few quoted words''}, attributes: { 'compat-mode' => '' }) assert_equal '“a few quoted words”', para.sub_quotes(para.source) para = block_from_string(%q{"`a few quoted words`"}) assert_equal '“a few quoted words”', para.sub_quotes(para.source) + + para = block_from_string(%q{"`a few quoted words`"}, backend: 'docbook') + assert_equal 'a few quoted words', para.sub_quotes(para.source) end test 'escaped single-line double-quoted string' do - para = block_from_string %(#{BACKSLASH}``a few quoted words''), :attributes => {'compat-mode' => ''} + para = block_from_string %(#{BACKSLASH}``a few quoted words''), attributes: { 'compat-mode' => '' } assert_equal %q(‘`a few quoted words’'), para.sub_quotes(para.source) - para = block_from_string %(#{BACKSLASH * 2}``a few quoted words''), :attributes => {'compat-mode' => ''} + para = block_from_string %(#{BACKSLASH * 2}``a few quoted words''), attributes: { 'compat-mode' => '' } assert_equal %q(``a few quoted words''), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH}"`a few quoted words`")) @@ -43,7 +76,7 @@ end test 'multi-line double-quoted string' do - para = block_from_string(%Q{``a few\nquoted words''}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%Q{``a few\nquoted words''}, attributes: { 'compat-mode' => '' }) assert_equal "“a few\nquoted words”", para.sub_quotes(para.source) para = block_from_string(%Q{"`a few\nquoted words`"}) @@ -51,7 +84,7 @@ end test 'double-quoted string with inline single quote' do - para = block_from_string(%q{``Here's Johnny!''}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{``Here's Johnny!''}, attributes: { 'compat-mode' => '' }) assert_equal %q{“Here's Johnny!”}, para.sub_quotes(para.source) para = block_from_string(%q{"`Here's Johnny!`"}) @@ -59,7 +92,7 @@ end test 'double-quoted string with inline backquote' do - para = block_from_string(%q{``Here`s Johnny!''}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{``Here`s Johnny!''}, attributes: { 'compat-mode' => '' }) assert_equal %q{“Here`s Johnny!”}, para.sub_quotes(para.source) para = block_from_string(%q{"`Here`s Johnny!`"}) @@ -75,15 +108,18 @@ end test 'single-line single-quoted string' do - para = block_from_string(%q{`a few quoted words'}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{`a few quoted words'}, attributes: { 'compat-mode' => '' }) assert_equal '‘a few quoted words’', para.sub_quotes(para.source) para = block_from_string(%q{'`a few quoted words`'}) assert_equal '‘a few quoted words’', para.sub_quotes(para.source) + + para = block_from_string(%q{'`a few quoted words`'}, backend: 'docbook') + assert_equal 'a few quoted words', para.sub_quotes(para.source) end test 'escaped single-line single-quoted string' do - para = block_from_string(%(#{BACKSLASH}`a few quoted words'), :attributes => {'compat-mode' => ''}) + para = block_from_string(%(#{BACKSLASH}`a few quoted words'), attributes: { 'compat-mode' => '' }) assert_equal %(`a few quoted words'), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH}'`a few quoted words`')) @@ -91,7 +127,7 @@ end test 'multi-line single-quoted string' do - para = block_from_string(%Q{`a few\nquoted words'}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%Q{`a few\nquoted words'}, attributes: { 'compat-mode' => '' }) assert_equal "‘a few\nquoted words’", para.sub_quotes(para.source) para = block_from_string(%Q{'`a few\nquoted words`'}) @@ -99,7 +135,7 @@ end test 'single-quoted string with inline single quote' do - para = block_from_string(%q{`That isn't what I did.'}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{`That isn't what I did.'}, attributes: { 'compat-mode' => '' }) assert_equal %q{‘That isn't what I did.’}, para.sub_quotes(para.source) para = block_from_string(%q{'`That isn't what I did.`'}) @@ -107,7 +143,7 @@ end test 'single-quoted string with inline backquote' do - para = block_from_string(%q{`Here`s Johnny!'}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{`Here`s Johnny!'}, attributes: { 'compat-mode' => '' }) assert_equal %q{‘Here`s Johnny!’}, para.sub_quotes(para.source) para = block_from_string(%q{'`Here`s Johnny!`'}) @@ -115,7 +151,7 @@ end test 'single-line constrained marked string' do - #para = block_from_string(%q{#a few words#}, :attributes => {'compat-mode' => ''}) + #para = block_from_string(%q{#a few words#}, attributes: { 'compat-mode' => '' }) #assert_equal 'a few words', para.sub_quotes(para.source) para = block_from_string(%q{#a few words#}) @@ -128,7 +164,7 @@ end test 'multi-line constrained marked string' do - #para = block_from_string(%Q{#a few\nwords#}, :attributes => {'compat-mode' => ''}) + #para = block_from_string(%Q{#a few\nwords#}, attributes: { 'compat-mode' => '' }) #assert_equal "a few\nwords", para.sub_quotes(para.source) para = block_from_string(%Q{#a few\nwords#}) @@ -141,7 +177,7 @@ end test 'single-line unconstrained marked string' do - #para = block_from_string(%q{##--anything goes ##}, :attributes => {'compat-mode' => ''}) + #para = block_from_string(%q{##--anything goes ##}, attributes: { 'compat-mode' => '' }) #assert_equal '--anything goes ', para.sub_quotes(para.source) para = block_from_string(%q{##--anything goes ##}) @@ -154,7 +190,7 @@ end test 'multi-line unconstrained marked string' do - #para = block_from_string(%Q{##--anything\ngoes ##}, :attributes => {'compat-mode' => ''}) + #para = block_from_string(%Q{##--anything\ngoes ##}, attributes: { 'compat-mode' => '' }) #assert_equal "--anything\ngoes ", para.sub_quotes(para.source) para = block_from_string(%Q{##--anything\ngoes ##}) @@ -189,7 +225,7 @@ test 'constrained strong string containing an asterisk and multibyte word chars' do para = block_from_string(%q{*黑*眼圈*}) assert_equal '黑*眼圈', para.sub_quotes(para.source) - end if ::RUBY_MIN_VERSION_1_9 + end test 'single-line constrained quote variation emphasized string' do para = block_from_string(%q{_a few emphasized words_}) @@ -213,7 +249,7 @@ end test 'single-quoted string containing an emphasized phrase' do - para = block_from_string(%q{`I told him, 'Just go for it!''}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{`I told him, 'Just go for it!''}, attributes: { 'compat-mode' => '' }) assert_equal '‘I told him, Just go for it!’', para.sub_quotes(para.source) para = block_from_string(%q{'`I told him, 'Just go for it!'`'}) @@ -221,11 +257,11 @@ end test 'escaped single-quotes inside emphasized words are restored' do - para = block_from_string(%('Here#{BACKSLASH}'s Johnny!'), :attributes => {'compat-mode' => ''}) - assert_equal %q(Here's Johnny!), para.apply_normal_subs(para.lines) + para = block_from_string(%('Here#{BACKSLASH}'s Johnny!'), attributes: { 'compat-mode' => '' }) + assert_equal %q(Here's Johnny!), para.apply_subs(para.source) para = block_from_string(%('Here#{BACKSLASH}'s Johnny!')) - assert_equal %q('Here's Johnny!'), para.apply_normal_subs(para.lines) + assert_equal %q('Here's Johnny!'), para.apply_subs(para.source) end test 'single-line constrained emphasized underline variation string' do @@ -243,67 +279,67 @@ assert_equal "a few\nemphasized words", para.sub_quotes(para.source) end - # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough + # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'single-line constrained monospaced string' do - para = block_from_string(%(`a few <{monospaced}> words`), :attributes => {'monospaced' => 'monospaced', 'compat-mode' => ''}) - assert_equal 'a few <{monospaced}> words', para.apply_normal_subs(para.lines) + para = block_from_string(%(`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced', 'compat-mode' => '' }) + assert_equal 'a few <{monospaced}> words', para.apply_subs(para.source) - para = block_from_string(%(`a few <{monospaced}> words`), :attributes => {'monospaced' => 'monospaced'}) - assert_equal 'a few <monospaced> words', para.apply_normal_subs(para.lines) + para = block_from_string(%(`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced' }) + assert_equal 'a few <monospaced> words', para.apply_subs(para.source) end - # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough + # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'single-line constrained monospaced string with role' do - para = block_from_string(%([input]`a few <{monospaced}> words`), :attributes => {'monospaced' => 'monospaced', 'compat-mode' => ''}) - assert_equal 'a few <{monospaced}> words', para.apply_normal_subs(para.lines) + para = block_from_string(%([input]`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced', 'compat-mode' => '' }) + assert_equal 'a few <{monospaced}> words', para.apply_subs(para.source) - para = block_from_string(%([input]`a few <{monospaced}> words`), :attributes => {'monospaced' => 'monospaced'}) - assert_equal 'a few <monospaced> words', para.apply_normal_subs(para.lines) + para = block_from_string(%([input]`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced' }) + assert_equal 'a few <monospaced> words', para.apply_subs(para.source) end - # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough + # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained monospaced string' do - para = block_from_string(%(#{BACKSLASH}`a few words`), :attributes => {'compat-mode' => ''}) - assert_equal '`a few <monospaced> words`', para.apply_normal_subs(para.lines) + para = block_from_string(%(#{BACKSLASH}`a few words`), attributes: { 'compat-mode' => '' }) + assert_equal '`a few <monospaced> words`', para.apply_subs(para.source) para = block_from_string(%(#{BACKSLASH}`a few words`)) - assert_equal '`a few <monospaced> words`', para.apply_normal_subs(para.lines) + assert_equal '`a few <monospaced> words`', para.apply_subs(para.source) end - # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough + # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained monospaced string with role' do - para = block_from_string(%([input]#{BACKSLASH}`a few words`), :attributes => {'compat-mode' => ''}) - assert_equal '[input]`a few <monospaced> words`', para.apply_normal_subs(para.lines) + para = block_from_string(%([input]#{BACKSLASH}`a few words`), attributes: { 'compat-mode' => '' }) + assert_equal '[input]`a few <monospaced> words`', para.apply_subs(para.source) para = block_from_string(%([input]#{BACKSLASH}`a few words`)) - assert_equal '[input]`a few <monospaced> words`', para.apply_normal_subs(para.lines) + assert_equal '[input]`a few <monospaced> words`', para.apply_subs(para.source) end - # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough + # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped role on single-line constrained monospaced string' do - para = block_from_string(%(#{BACKSLASH}[input]`a few words`), :attributes => {'compat-mode' => ''}) - assert_equal '[input]a few <monospaced> words', para.apply_normal_subs(para.lines) + para = block_from_string(%(#{BACKSLASH}[input]`a few words`), attributes: { 'compat-mode' => '' }) + assert_equal '[input]a few <monospaced> words', para.apply_subs(para.source) para = block_from_string(%(#{BACKSLASH}[input]`a few words`)) - assert_equal '[input]a few <monospaced> words', para.apply_normal_subs(para.lines) + assert_equal '[input]a few <monospaced> words', para.apply_subs(para.source) end - # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough + # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped role on escaped single-line constrained monospaced string' do - para = block_from_string(%(#{BACKSLASH}[input]#{BACKSLASH}`a few words`), :attributes => {'compat-mode' => ''}) - assert_equal %(#{BACKSLASH}[input]`a few <monospaced> words`), para.apply_normal_subs(para.lines) + para = block_from_string(%(#{BACKSLASH}[input]#{BACKSLASH}`a few words`), attributes: { 'compat-mode' => '' }) + assert_equal %(#{BACKSLASH}[input]`a few <monospaced> words`), para.apply_subs(para.source) para = block_from_string(%(#{BACKSLASH}[input]#{BACKSLASH}`a few words`)) - assert_equal %(#{BACKSLASH}[input]`a few <monospaced> words`), para.apply_normal_subs(para.lines) + assert_equal %(#{BACKSLASH}[input]`a few <monospaced> words`), para.apply_subs(para.source) end - # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough + # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'multi-line constrained monospaced string' do - para = block_from_string(%(`a few\n<{monospaced}> words`), :attributes => {'monospaced' => 'monospaced', 'compat-mode' => ''}) - assert_equal "a few\n<{monospaced}> words", para.apply_normal_subs(para.lines) + para = block_from_string(%(`a few\n<{monospaced}> words`), attributes: { 'monospaced' => 'monospaced', 'compat-mode' => '' }) + assert_equal "a few\n<{monospaced}> words", para.apply_subs(para.source) - para = block_from_string(%(`a few\n<{monospaced}> words`), :attributes => {'monospaced' => 'monospaced'}) - assert_equal "a few\n<monospaced> words", para.apply_normal_subs(para.lines) + para = block_from_string(%(`a few\n<{monospaced}> words`), attributes: { 'monospaced' => 'monospaced' }) + assert_equal "a few\n<monospaced> words", para.apply_subs(para.source) end test 'single-line unconstrained strong chars' do @@ -368,7 +404,7 @@ end test 'single-line constrained monospaced chars' do - para = block_from_string(%q{call +save()+ to persist the changes}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{call +save()+ to persist the changes}, attributes: { 'compat-mode' => '' }) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) para = block_from_string(%q{call [x-]+save()+ to persist the changes}) @@ -379,7 +415,7 @@ end test 'single-line constrained monospaced chars with role' do - para = block_from_string(%q{call [method]+save()+ to persist the changes}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{call [method]+save()+ to persist the changes}, attributes: { 'compat-mode' => '' }) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) para = block_from_string(%q{call [method x-]+save()+ to persist the changes}) @@ -390,7 +426,7 @@ end test 'escaped single-line constrained monospaced chars' do - para = block_from_string(%(call #{BACKSLASH}+save()+ to persist the changes), :attributes => {'compat-mode' => ''}) + para = block_from_string(%(call #{BACKSLASH}+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal 'call +save()+ to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}`save()` to persist the changes)) @@ -398,7 +434,7 @@ end test 'escaped single-line constrained monospaced chars with role' do - para = block_from_string(%(call [method]#{BACKSLASH}+save()+ to persist the changes), :attributes => {'compat-mode' => ''}) + para = block_from_string(%(call [method]#{BACKSLASH}+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal 'call [method]+save()+ to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call [method]#{BACKSLASH}`save()` to persist the changes)) @@ -406,7 +442,7 @@ end test 'escaped role on single-line constrained monospaced chars' do - para = block_from_string(%(call #{BACKSLASH}[method]+save()+ to persist the changes), :attributes => {'compat-mode' => ''}) + para = block_from_string(%(call #{BACKSLASH}[method]+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal 'call [method]save() to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}[method]`save()` to persist the changes)) @@ -414,7 +450,7 @@ end test 'escaped role on escaped single-line constrained monospaced chars' do - para = block_from_string(%(call #{BACKSLASH}[method]#{BACKSLASH}+save()+ to persist the changes), :attributes => {'compat-mode' => ''}) + para = block_from_string(%(call #{BACKSLASH}[method]#{BACKSLASH}+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal %(call #{BACKSLASH}[method]+save()+ to persist the changes), para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}[method]#{BACKSLASH}`save()` to persist the changes)) @@ -422,7 +458,7 @@ end test 'single-line unconstrained monospaced chars' do - para = block_from_string(%q{Git++Hub++}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%q{Git++Hub++}, attributes: { 'compat-mode' => '' }) assert_equal 'GitHub', para.sub_quotes(para.source) para = block_from_string(%q{Git[x-]++Hub++}) @@ -433,10 +469,10 @@ end test 'escaped single-line unconstrained monospaced chars' do - para = block_from_string(%(Git#{BACKSLASH}++Hub++), :attributes => {'compat-mode' => ''}) + para = block_from_string(%(Git#{BACKSLASH}++Hub++), attributes: { 'compat-mode' => '' }) assert_equal 'Git+Hub+', para.sub_quotes(para.source) - para = block_from_string(%(Git#{BACKSLASH * 2}++Hub++), :attributes => {'compat-mode' => ''}) + para = block_from_string(%(Git#{BACKSLASH * 2}++Hub++), attributes: { 'compat-mode' => '' }) assert_equal 'Git++Hub++', para.sub_quotes(para.source) para = block_from_string(%(Git#{BACKSLASH}``Hub``)) @@ -444,7 +480,7 @@ end test 'multi-line unconstrained monospaced chars' do - para = block_from_string(%Q{Git++\nH\nu\nb++}, :attributes => {'compat-mode' => ''}) + para = block_from_string(%Q{Git++\nH\nu\nb++}, attributes: { 'compat-mode' => '' }) assert_equal "Git\nH\nu\nb", para.sub_quotes(para.source) para = block_from_string(%Q{Git[x-]++\nH\nu\nb++}) @@ -476,7 +512,7 @@ test 'does not confuse superscript and links with blank window shorthand' do para = block_from_string(%Q{http://localhost[Text^] on the 21^st^ and 22^nd^}) - assert_equal 'Text on the 21st and 22nd', para.content + assert_equal 'Text on the 21st and 22nd', para.content end test 'single-line subscript chars' do @@ -511,17 +547,17 @@ test 'quoted text with id shorthand' do para = block_from_string(%q{[#bond]#007#}) - assert_equal '007', para.sub_quotes(para.source) + assert_equal '007', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand' do para = block_from_string(%q{[#bond.white.red-background]#007#}) - assert_equal '007', para.sub_quotes(para.source) + assert_equal '007', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand using docbook backend' do - para = block_from_string(%q{[#bond.white.red-background]#007#}, :backend => 'docbook45') - assert_equal '007', para.sub_quotes(para.source) + para = block_from_string(%q{[#bond.white.red-background]#007#}, backend: 'docbook') + assert_equal '007', para.sub_quotes(para.source) end test 'should ignore attributes after comma' do @@ -529,18 +565,16 @@ assert_equal 'alert', para.sub_quotes(para.source) end - test 'should assign role attribute when shorthand style contains a role' do - para = block_from_string 'blah' - result = para.parse_quoted_text_attributes '.red#idref' - expect = {'id' => 'idref', 'role' => 'red'} - assert_equal expect, result + test 'inline passthrough with id and role set using shorthand' do + %w(#idname.rolename .rolename#idname).each do |attrlist| + para = block_from_string %([#{attrlist}]+pass+) + assert_equal 'pass', para.content + end end test 'should not assign role attribute if shorthand style has no roles' do - para = block_from_string 'blah' - result = para.parse_quoted_text_attributes '#idref' - expect = {'id' => 'idref'} - assert_equal expect, result + para = block_from_string '[#idname]*blah*' + assert_equal 'blah', para.content end end @@ -566,24 +600,45 @@ end test 'a mailto macro with text and subject should be interpreted as a mailto link' do - para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer, Pull request]', :attributes => {'linkattrs' => ''}) - assert_equal %q{Doc Writer}, para.sub_macros(para.source) + para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer, Pull request]') + assert_equal %q{Doc Writer}, para.sub_macros(para.source) end test 'a mailto macro with text, subject and body should be interpreted as a mailto link' do - para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer, Pull request, Please accept my pull request]', :attributes => {'linkattrs' => ''}) - assert_equal %q{Doc Writer}, para.sub_macros(para.source) + para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer, Pull request, Please accept my pull request]') + assert_equal %q{Doc Writer}, para.sub_macros(para.source) + end + + test 'a mailto macro with subject and body only should use e-mail as text' do + para = block_from_string('mailto:doc.writer@asciidoc.org[,Pull request,Please accept my pull request]') + assert_equal %q{doc.writer@asciidoc.org}, para.sub_macros(para.source) end test 'should recognize inline email addresses' do - para = block_from_string('doc.writer@asciidoc.org') - assert_equal %q{doc.writer@asciidoc.org}, para.sub_macros(para.source) + %w( + doc.writer@asciidoc.org + author+website@4fs.no + john@domain.uk.co + name@somewhere.else.com + joe_bloggs@mail_server.com + joe-bloggs@mail-server.com + joe.bloggs@mail.server.com + FOO@BAR.COM + docs@writing.ninja + ).each do |input| + para = block_from_string input + assert_equal %(#{input}), (para.sub_macros para.source) + end + end + + test 'should recognize inline email address containing an ampersand' do + para = block_from_string('bert&ernie@sesamestreet.com') + assert_equal %q{bert&ernie@sesamestreet.com}, para.apply_subs(para.source) + end + + test 'should recognize inline email address surrounded by angle brackets' do para = block_from_string('') - assert_equal %q{<doc.writer@asciidoc.org>}, para.apply_normal_subs(para.lines) - para = block_from_string('author+website@4fs.no') - assert_equal %q{author+website@4fs.no}, para.sub_macros(para.source) - para = block_from_string('john@domain.uk.co') - assert_equal %q{john@domain.uk.co}, para.sub_macros(para.source) + assert_equal %q{<doc.writer@asciidoc.org>}, para.apply_subs(para.source) end test 'should ignore escaped inline email address' do @@ -606,10 +661,21 @@ assert_equal %{Google\nHomepage}, para.sub_macros(para.source) end - test 'a multi-line raw url with attribute as text should be interpreted as a link with resolved attribute' do + test 'a single-line raw url with attribute as text should be interpreted as a link with resolved attribute' do para = block_from_string("http://google.com[{google_homepage}]") para.document.attributes['google_homepage'] = 'Google Homepage' - assert_equal %q{Google Homepage}, para.sub_macros(para.source) + assert_equal %q{Google Homepage}, para.sub_macros(para.sub_attributes(para.source)) + end + + test 'should not resolve an escaped attribute in link text' do + { + 'http://google.com' => "http://google.com[#{BACKSLASH}{google_homepage}]", + 'http://google.com?q=,' => "link:http://google.com?q=,[#{BACKSLASH}{google_homepage}]", + }.each do |uri, macro| + para = block_from_string macro + para.document.attributes['google_homepage'] = 'Google Homepage' + assert_equal %({google_homepage}), para.sub_macros(para.sub_attributes(para.source)) + end end test 'a single-line escaped raw url should not be interpreted as a link' do @@ -637,23 +703,30 @@ assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end + test 'should encode special characters in alt text of inline image' do + input = 'A tiger\'s "roar" is < a bear\'s "growl"' + expected = 'A tiger’s "roar" is < a bear’s "growl"' + output = (convert_inline_string %(image:tiger-roar.png[#{input}])).gsub(/>\s+<') + assert_equal %(#{expected}), output + end + test 'an image macro with SVG image and text should be interpreted as an image with alt text' do para = block_from_string('image:tiger.svg[Tiger]') assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with an interactive SVG image and alt text should be converted to an object element' do - para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'imagesdir' => 'images' }) + para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'imagesdir' => 'images' }) assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with an interactive SVG image, fallback and alt text should be converted to an object element' do - para = block_from_string('image:tiger.svg[Tiger,fallback=tiger.png,opts=interactive]', :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'imagesdir' => 'images' }) + para = block_from_string('image:tiger.svg[Tiger,fallback=tiger.png,opts=interactive]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'imagesdir' => 'images' }) assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with an inline SVG image should be converted to an svg element' do - para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'imagesdir' => 'fixtures', 'docdir' => ::File.dirname(__FILE__) }) + para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'imagesdir' => 'fixtures', 'docdir' => testdir }) result = para.sub_macros(para.source).gsub(/>\s+<') assert_match(/]*width="100px"[^>]*>/, result) refute_match(/]*width="500px"[^>]*>/, result) @@ -662,12 +735,12 @@ end test 'an image macro with an inline SVG image should be converted to an svg element even when data-uri is set' do - para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'data-uri' => '', 'imagesdir' => 'fixtures', 'docdir' => ::File.dirname(__FILE__) }) + para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'data-uri' => '', 'imagesdir' => 'fixtures', 'docdir' => testdir }) assert_match(/]*width="100px">/, para.sub_macros(para.source).gsub(/>\s+<')) end test 'an image macro with an SVG image should not use an object element when safe mode is secure' do - para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', :attributes => { 'imagesdir' => 'images' }) + para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', attributes: { 'imagesdir' => 'images' }) assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end @@ -683,7 +756,7 @@ end test 'a single-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions in docbook' do - para = block_from_string 'image:tiger.png[Tiger, 200, 100]', :backend => 'docbook' + para = block_from_string 'image:tiger.png[Tiger, 200, 100]', backend: 'docbook' assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end @@ -694,6 +767,24 @@ para.sub_macros(para.source).gsub(/>\s+<') end + test 'rel=noopener should be added to an image with a link that targets the _blank window' do + para = block_from_string 'image:tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=_blank]' + assert_equal %{Tiger}, + para.sub_macros(para.source).gsub(/>\s+<') + end + + test 'rel=noopener should be added to an image with a link that targets a named window when the noopener option is set' do + para = block_from_string 'image:tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=name,opts=noopener]' + assert_equal %{Tiger}, + para.sub_macros(para.source).gsub(/>\s+<') + end + + test 'rel=nofollow should be added to an image with a link when the nofollow option is set' do + para = block_from_string 'image:tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,opts=nofollow]' + assert_equal %{Tiger}, + para.sub_macros(para.source).gsub(/>\s+<') + end + test 'a multi-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions' do para = block_from_string(%(image:tiger.png[Another\nAwesome\nTiger, 200,\n100])) assert_equal %{Another Awesome Tiger}, @@ -708,37 +799,72 @@ test 'an inline image macro with a float attribute should be interpreted as a floating image' do para = block_from_string %(image:http://example.com/images/tiger.png[tiger, float="right"] Beware of the tigers!) - assert_equal %{tiger Beware of the tigers!}, + assert_equal %{tiger Beware of the tigers!}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should prepend value of imagesdir attribute to inline image target if target is relative path' do - para = block_from_string %(Beware of the image:tiger.png[tiger].), :attributes => {'imagesdir' => './images'} + para = block_from_string %(Beware of the image:tiger.png[tiger].), attributes: { 'imagesdir' => './images' } assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should not prepend value of imagesdir attribute to inline image target if target is absolute path' do - para = block_from_string %(Beware of the image:/tiger.png[tiger].), :attributes => {'imagesdir' => './images'} + para = block_from_string %(Beware of the image:/tiger.png[tiger].), attributes: { 'imagesdir' => './images' } assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should not prepend value of imagesdir attribute to inline image target if target is url' do - para = block_from_string %(Beware of the image:http://example.com/images/tiger.png[tiger].), :attributes => {'imagesdir' => './images'} + para = block_from_string %(Beware of the image:http://example.com/images/tiger.png[tiger].), attributes: { 'imagesdir' => './images' } assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end - test 'a block image macro should not be detected within paragraph text' do + test 'should match an inline image macro if target contains a space character' do + para = block_from_string(%(Beware of the image:big cats.png[] around here.)) + assert_equal %(Beware of the big cats around here.), + para.sub_macros(para.source).gsub(/>\s+<') + end + + test 'should not match an inline image macro if target contains a newline character' do + para = block_from_string(%(Fear not. There are no image:big\ncats.png[] around here.)) + result = para.sub_macros(para.source) + refute_includes result, ' '', 'iconsdir' => 'fixtures', 'docdir' => testdir }, safe: :server, catalog_assets: true + assert 1, sect.document.catalog[:images].size + assert_equal 'fixtures/dot.gif', sect.document.catalog[:images][0].to_s + assert_nil sect.document.catalog[:images][0].imagesdir + assert logger.empty? + end end test 'an icon macro should be interpreted as an icon if icons are enabled' do - para = block_from_string 'icon:github[]', :attributes => {'icons' => ''} + para = block_from_string 'icon:github[]', attributes: { 'icons' => '' } assert_equal %{github}, para.sub_macros(para.source).gsub(/>\s+<') end @@ -747,178 +873,258 @@ assert_equal %{[github]}, para.sub_macros(para.source).gsub(/>\s+<') end - test 'an icon macro should render alt text if icons are disabled and alt is given' do + test 'an icon macro should output alt text if icons are disabled and alt is given' do para = block_from_string 'icon:github[alt="GitHub"]' assert_equal %{[GitHub]}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro should be interpreted as a font-based icon when icons=font' do - para = block_from_string 'icon:github[]', :attributes => {'icons' => 'font'} + para = block_from_string 'icon:github[]', attributes: { 'icons' => 'font' } assert_equal %{}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro with a size should be interpreted as a font-based icon with a size when icons=font' do - para = block_from_string 'icon:github[4x]', :attributes => {'icons' => 'font'} + para = block_from_string 'icon:github[4x]', attributes: { 'icons' => 'font' } assert_equal %{}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro with a role and title should be interpreted as a font-based icon with a class and title when icons=font' do - para = block_from_string 'icon:heart[role="red", title="Heart me"]', :attributes => {'icons' => 'font'} + para = block_from_string 'icon:heart[role="red", title="Heart me"]', attributes: { 'icons' => 'font' } assert_equal %{}, para.sub_macros(para.source).gsub(/>\s+<') end - test 'a single-line footnote macro should be registered and rendered as a footnote' do + test 'a single-line footnote macro should be registered and output as a footnote' do para = block_from_string('Sentence text footnote:[An example footnote.].') - assert_equal %(Sentence text [1].), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote = para.document.references[:footnotes].first + assert_equal %(Sentence text [1].), para.sub_macros(para.source) + assert_equal 1, para.document.catalog[:footnotes].size + footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index - assert footnote.id.nil? + assert_nil footnote.id assert_equal 'An example footnote.', footnote.text end - test 'a multi-line footnote macro should be registered and rendered as a footnote without endline' do + test 'a multi-line footnote macro should be registered and output as a footnote without newline' do para = block_from_string("Sentence text footnote:[An example footnote\nwith wrapped text.].") - assert_equal %(Sentence text [1].), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote = para.document.references[:footnotes].first + assert_equal %(Sentence text [1].), para.sub_macros(para.source) + assert_equal 1, para.document.catalog[:footnotes].size + footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index - assert footnote.id.nil? + assert_nil footnote.id assert_equal "An example footnote with wrapped text.", footnote.text end - test 'an escaped closing square bracket in a footnote should be unescaped when rendered' do + test 'an escaped closing square bracket in a footnote should be unescaped when converted' do para = block_from_string(%(footnote:[a #{BACKSLASH}] b].)) - assert_equal %([1].), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote = para.document.references[:footnotes].first + assert_equal %([1].), para.sub_macros(para.source) + assert_equal 1, para.document.catalog[:footnotes].size + footnote = para.document.catalog[:footnotes].first assert_equal "a ] b", footnote.text end test 'a footnote macro can be directly adjacent to preceding word' do para = block_from_string('Sentence textfootnote:[An example footnote.].') - assert_equal %(Sentence text[1].), para.sub_macros(para.source) + assert_equal %(Sentence text[1].), para.sub_macros(para.source) end test 'a footnote macro may contain an escaped backslash' do para = block_from_string("footnote:[\\]]\nfootnote:[a \\] b]\nfootnote:[a \\]\\] b]") para.sub_macros(para.source) - assert_equal 3, para.document.references[:footnotes].size - footnote1 = para.document.references[:footnotes][0] + assert_equal 3, para.document.catalog[:footnotes].size + footnote1 = para.document.catalog[:footnotes][0] assert_equal ']', footnote1.text - footnote2 = para.document.references[:footnotes][1] + footnote2 = para.document.catalog[:footnotes][1] assert_equal 'a ] b', footnote2.text - footnote3 = para.document.references[:footnotes][2] + footnote3 = para.document.catalog[:footnotes][2] assert_equal 'a ]] b', footnote3.text end test 'a footnote macro may contain a link macro' do - para = block_from_string('Share your code. footnote:[http://github.com[GitHub]]') - assert_equal %(Share your code. [1]), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote1 = para.document.references[:footnotes][0] - assert_equal 'GitHub', footnote1.text + para = block_from_string('Share your code. footnote:[https://github.com[GitHub]]') + assert_equal %(Share your code. [1]), para.sub_macros(para.source) + assert_equal 1, para.document.catalog[:footnotes].size + footnote1 = para.document.catalog[:footnotes][0] + assert_equal 'GitHub', footnote1.text end test 'a footnote macro may contain a plain URL' do para = block_from_string %(the JLine footnote:[https://github.com/jline/jline2]\nlibrary.) result = para.sub_macros para.source - assert_equal %(the JLine [1]\nlibrary.), result - assert_equal 1, para.document.references[:footnotes].size - fn1 = para.document.references[:footnotes].first + assert_equal %(the JLine [1]\nlibrary.), result + assert_equal 1, para.document.catalog[:footnotes].size + fn1 = para.document.catalog[:footnotes].first assert_equal 'https://github.com/jline/jline2', fn1.text end test 'a footnote macro followed by a semi-colon may contain a plain URL' do para = block_from_string %(the JLine footnote:[https://github.com/jline/jline2];\nlibrary.) result = para.sub_macros para.source - assert_equal %(the JLine [1];\nlibrary.), result - assert_equal 1, para.document.references[:footnotes].size - fn1 = para.document.references[:footnotes].first + assert_equal %(the JLine [1];\nlibrary.), result + assert_equal 1, para.document.catalog[:footnotes].size + fn1 = para.document.catalog[:footnotes].first assert_equal 'https://github.com/jline/jline2', fn1.text end - test 'a footnote macro may contain an xref macro' do + test 'a footnote macro may contain a shorthand xref' do # specialcharacters escaping is simulated - para = block_from_string('text footnote:[<<_install,Install>>]') - assert_equal %(text [1]), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote1 = para.document.references[:footnotes][0] - assert_equal 'Install', footnote1.text + para = block_from_string('text footnote:[<<_install,install>>]') + doc = para.document + doc.register :refs, ['_install', (Asciidoctor::Inline.new doc, :anchor, 'Install', type: :ref, target: '_install'), 'Install'] + catalog = doc.catalog + assert_equal %(text [1]), para.sub_macros(para.source) + assert_equal 1, catalog[:footnotes].size + footnote1 = catalog[:footnotes][0] + assert_equal 'install', footnote1.text + end + + test 'a footnote macro may contain an xref macro' do + para = block_from_string('text footnote:[xref:_install[install]]') + doc = para.document + doc.register :refs, ['_install', (Asciidoctor::Inline.new doc, :anchor, 'Install', type: :ref, target: '_install'), 'Install'] + catalog = doc.catalog + assert_equal %(text [1]), para.sub_macros(para.source) + assert_equal 1, catalog[:footnotes].size + footnote1 = catalog[:footnotes][0] + assert_equal 'install', footnote1.text end test 'a footnote macro may contain an anchor macro' do - para = block_from_string('text footnote:[a [[b\]\] \[[c\]\] d]') - assert_equal %(text [1]), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote1 = para.document.references[:footnotes][0] + para = block_from_string('text footnote:[a [[b]] [[c\]\] d]') + assert_equal %(text [1]), para.sub_macros(para.source) + assert_equal 1, para.document.catalog[:footnotes].size + footnote1 = para.document.catalog[:footnotes][0] assert_equal 'a [[c]] d', footnote1.text end test 'subsequent footnote macros with escaped URLs should be restored in DocBook' do - input = <<-EOS -foofootnote:[+http://example.com+]barfootnote:[+http://acme.com+]baz - EOS + input = 'foofootnote:[+http://example.com+]barfootnote:[+http://acme.com+]baz' - result = render_embedded_string input, :doctype => 'inline', :backend => 'docbook' + result = convert_string_to_embedded input, doctype: 'inline', backend: 'docbook' assert_equal 'foohttp://example.combarhttp://acme.combaz', result end - test 'a footnote macro may contain a bibliographic anchor macro' do - para = block_from_string('text footnote:[a [[[b\]\]\] c]') - assert_equal %(text [1]), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote1 = para.document.references[:footnotes][0] - assert_equal 'a [b] c', footnote1.text - end - test 'should increment index of subsequent footnote macros' do para = block_from_string("Sentence text footnote:[An example footnote.]. Sentence text footnote:[Another footnote.].") - assert_equal %(Sentence text [1]. Sentence text [2].), para.sub_macros(para.source) - assert_equal 2, para.document.references[:footnotes].size - footnote1 = para.document.references[:footnotes][0] + assert_equal %(Sentence text [1]. Sentence text [2].), para.sub_macros(para.source) + assert_equal 2, para.document.catalog[:footnotes].size + footnote1 = para.document.catalog[:footnotes][0] assert_equal 1, footnote1.index - assert footnote1.id.nil? + assert_nil footnote1.id assert_equal "An example footnote.", footnote1.text - footnote2 = para.document.references[:footnotes][1] + footnote2 = para.document.catalog[:footnotes][1] assert_equal 2, footnote2.index - assert footnote2.id.nil? + assert_nil footnote2.id assert_equal "Another footnote.", footnote2.text end - test 'a footnoteref macro with id and single-line text should be registered and rendered as a footnote' do - para = block_from_string('Sentence text footnoteref:[ex1, An example footnote.].') - assert_equal %(Sentence text [1].), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote = para.document.references[:footnotes].first + test 'a footnoteref macro with id and single-line text should be registered and output as a footnote' do + para = block_from_string 'Sentence text footnoteref:[ex1, An example footnote.].', attributes: { 'compat-mode' => '' } + assert_equal %(Sentence text [1].), para.sub_macros(para.source) + assert_equal 1, para.document.catalog[:footnotes].size + footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal 'An example footnote.', footnote.text end - test 'a footnoteref macro with id and multi-line text should be registered and rendered as a footnote without endlines' do - para = block_from_string("Sentence text footnoteref:[ex1, An example footnote\nwith wrapped text.].") - assert_equal %(Sentence text [1].), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote = para.document.references[:footnotes].first + test 'a footnoteref macro with id and multi-line text should be registered and output as a footnote without newlines' do + para = block_from_string "Sentence text footnoteref:[ex1, An example footnote\nwith wrapped text.].", attributes: { 'compat-mode' => '' } + assert_equal %(Sentence text [1].), para.sub_macros(para.source) + assert_equal 1, para.document.catalog[:footnotes].size + footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal "An example footnote with wrapped text.", footnote.text end test 'a footnoteref macro with id should refer to footnoteref with same id' do - para = block_from_string('Sentence text footnoteref:[ex1, An example footnote.]. Sentence text footnoteref:[ex1].') - assert_equal %(Sentence text [1]. Sentence text [1].), para.sub_macros(para.source) - assert_equal 1, para.document.references[:footnotes].size - footnote = para.document.references[:footnotes].first + para = block_from_string 'Sentence text footnoteref:[ex1, An example footnote.]. Sentence text footnoteref:[ex1].', attributes: { 'compat-mode' => '' } + assert_equal %(Sentence text [1]. Sentence text [1].), para.sub_macros(para.source) + assert_equal 1, para.document.catalog[:footnotes].size + footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal 'An example footnote.', footnote.text end - test 'an unresolved footnoteref should not crash the processor' do - para = block_from_string('Sentence text footnoteref:[ex1].') - para.sub_macros para.source + test 'an unresolved footnote reference should produce a warning message' do + input = 'Sentence text.footnote:ex1[]' + using_memory_logger do |logger| + para = block_from_string input + para.sub_macros para.source + assert_message logger, :WARN, 'invalid footnote reference: ex1' + end + end + + test 'using a footnoteref macro should generate a warning when compat mode is not enabled' do + input = 'Sentence text.footnoteref:[fn1,Commentary on this sentence.]' + using_memory_logger do |logger| + para = block_from_string input + para.sub_macros para.source + assert_message logger, :WARN, 'found deprecated footnoteref macro: footnoteref:[fn1,Commentary on this sentence.]; use footnote macro with target instead' + end + end + + test 'inline footnote macro can be used to define and reference a footnote reference' do + input = <<~'EOS' + You can download the software from the product page.footnote:sub[Option only available if you have an active subscription.] + + You can also file a support request.footnote:sub[] + + If all else fails, you can give us a call.footnoteref:[sub] + EOS + + using_memory_logger do |logger| + output = convert_string_to_embedded input, attributes: { 'compat-mode' => '' } + assert_css '#_footnotedef_1', output, 1 + assert_css 'p a[href="#_footnotedef_1"]', output, 3 + assert_css '#footnotes .footnote', output, 1 + assert logger.empty? + end + end + + test 'should parse multiple footnote references in a single line' do + input = 'notable text.footnote:id[about this [text\]], footnote:id[], footnote:id[]' + output = convert_string_to_embedded input + assert_xpath '(//p)[1]/sup[starts-with(@class,"footnote")]', output, 3 + assert_xpath '(//p)[1]/sup[@class="footnote"]', output, 1 + assert_xpath '(//p)[1]/sup[@class="footnoteref"]', output, 2 + assert_xpath '(//p)[1]/sup[starts-with(@class,"footnote")]/a[@class="footnote"][text()="1"]', output, 3 + assert_css '#footnotes .footnote', output, 1 + end + + test 'should not resolve an inline footnote macro missing both id and text' do + input = <<~'EOS' + The footnote:[] macro can be used for defining and referencing footnotes. + + The footnoteref:[] macro is now deprecated. + EOS + + output = convert_string_to_embedded input + assert_includes output, 'The footnote:[] macro' + assert_includes output, 'The footnoteref:[] macro' + end + + test 'inline footnote macro can define a numeric id without conflicting with auto-generated ID' do + input = 'You can download the software from the product page.footnote:1[Option only available if you have an active subscription.]' + + output = convert_string_to_embedded input + assert_css '#_footnote_1', output, 1 + assert_css 'p sup#_footnote_1', output, 1 + assert_css 'p a#_footnoteref_1', output, 1 + assert_css 'p a[href="#_footnotedef_1"]', output, 1 + assert_css '#footnotes #_footnotedef_1', output, 1 + end + + test 'inline footnote macro can define an id that uses any word characters in Unicode' do + input = <<~'EOS' + L'origine du mot forêt{blank}footnote:forêt[un massif forestier] est complexe. + + Qu'est-ce qu'une forêt ?{blank}footnote:forêt[] + EOS + output = convert_string_to_embedded input + assert_css '#_footnote_forêt', output, 1 + assert_css '#_footnotedef_1', output, 1 + assert_xpath '//a[@class="footnote"][text()="1"]', output, 2 end test 'a single-line index term macro with a primary term should be registered as an index reference' do @@ -928,8 +1134,8 @@ para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['Tigers'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['Tigers'], para.document.catalog[:indexterms].first end end @@ -940,8 +1146,8 @@ para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['Big cats', 'Tigers'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['Big cats', 'Tigers'], para.document.catalog[:indexterms].first end end @@ -952,8 +1158,8 @@ para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['Big cats', 'Tigers', 'Panthera tigris'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['Big cats', 'Tigers', 'Panthera tigris'], para.document.catalog[:indexterms].first end end @@ -964,33 +1170,51 @@ para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['Panthera tigris'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['Panthera tigris'], para.document.catalog[:indexterms].first end end + test 'should escape concealed index term if second bracket is preceded by a backslash' do + input = %[National Institute of Science and Technology (#{BACKSLASH}((NIST)))] + doc = document_from_string input, standalone: false + output = doc.convert + assert_xpath '//p[text()="National Institute of Science and Technology (((NIST)))"]', output, 1 + #assert doc.catalog[:indexterms].empty? + end + + test 'should only escape enclosing brackets if concealed index term is preceded by a backslash' do + input = %[National Institute of Science and Technology #{BACKSLASH}(((NIST)))] + doc = document_from_string input, standalone: false + output = doc.convert + assert_xpath '//p[text()="National Institute of Science and Technology (NIST)"]', output, 1 + #term = doc.catalog[:indexterms].first + #assert_equal 1, term.size + #assert_equal 'NIST', term.first + end + test 'should not split index terms on commas inside of quoted terms' do inputs = [] - inputs.push <<-EOS -Tigers are big, scary cats. -indexterm:[Tigers, "[Big\\], -scary cats"] -EOS - inputs.push <<-EOS -Tigers are big, scary cats. -(((Tigers, "[Big], -scary cats"))) -EOS + inputs.push <<~'EOS' + Tigers are big, scary cats. + indexterm:[Tigers, "[Big\], + scary cats"] + EOS + inputs.push <<~'EOS' + Tigers are big, scary cats. + (((Tigers, "[Big], + scary cats"))) + EOS inputs.each do |input| para = block_from_string input output = para.sub_macros(para.source) assert_equal input.lines.first, output - assert_equal 1, para.document.references[:indexterms].size - terms = para.document.references[:indexterms].first - assert_equal 2, terms.size - assert_equal 'Tigers', terms.first - assert_equal '[Big], scary cats', terms.last + #assert_equal 1, para.document.catalog[:indexterms].size + #terms = para.document.catalog[:indexterms].first + #assert_equal 2, terms.size + #assert_equal 'Tigers', terms.first + #assert_equal '[Big], scary cats', terms.last end end @@ -999,10 +1223,10 @@ macros = ['indexterm:[*Tigers*]', '(((*Tigers*)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") - output = para.apply_normal_subs(para.lines) + output = para.apply_subs(para.source) assert_equal sentence, output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['Tigers'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['Tigers'], para.document.catalog[:indexterms].first end end @@ -1012,9 +1236,9 @@ para = block_from_string("#{sentence}\n#{macros}") output = para.sub_macros(para.source) assert_equal sentence, output.rstrip - assert_equal 2, para.document.references[:indexterms].size - assert_equal ['Tigers'], para.document.references[:indexterms][0] - assert_equal ['Animals', 'Cats'], para.document.references[:indexterms][1] + #assert_equal 2, para.document.catalog[:indexterms].size + #assert_equal ['Tigers'], para.document.catalog[:indexterms][0] + #assert_equal ['Animals', 'Cats'], para.document.catalog[:indexterms][1] end test 'an index term macro with round bracket syntax may contain round brackets in term' do @@ -1023,8 +1247,40 @@ para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['Tiger (Panthera tigris)'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['Tiger (Panthera tigris)'], para.document.catalog[:indexterms].first + end + + test 'visible shorthand index term macro should not consume trailing round bracket' do + input = '(text with ((index term)))' + expected = <<~'EOS'.chop + (text with + index term + index term) + EOS + #expected_term = ['index term'] + para = block_from_string input, backend: :docbook + output = para.sub_macros para.source + assert_equal expected, output + #indexterms_table = para.document.catalog[:indexterms] + #assert_equal 1, indexterms_table.size + #assert_equal expected_term, indexterms_table[0] + end + + test 'visible shorthand index term macro should not consume leading round bracket' do + input = '(((index term)) for text)' + expected = <<~'EOS'.chop + ( + index term + index term for text) + EOS + #expected_term = ['index term'] + para = block_from_string input, backend: :docbook + output = para.sub_macros para.source + assert_equal expected, output + #indexterms_table = para.document.catalog[:indexterms] + #assert_equal 1, indexterms_table.size + #assert_equal expected_term, indexterms_table[0] end test 'an index term macro with square bracket syntax may contain square brackets in term' do @@ -1033,8 +1289,8 @@ para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['Tiger [Panthera tigris]'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['Tiger [Panthera tigris]'], para.document.catalog[:indexterms].first end test 'a single-line index term 2 macro should be registered as an index reference and retain term inline' do @@ -1044,8 +1300,8 @@ para = block_from_string(macro) output = para.sub_macros(para.source) assert_equal sentence, output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['tiger'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['tiger'], para.document.catalog[:indexterms].first end end @@ -1056,8 +1312,8 @@ para = block_from_string(macro) output = para.sub_macros(para.source) assert_equal sentence, output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['panthera tigris'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['panthera tigris'], para.document.catalog[:indexterms].first end end @@ -1066,18 +1322,26 @@ para = block_from_string(sentence) output = para.sub_macros(para.source) assert_equal 'The tiger (Panthera tigris) is the largest cat species.', output - assert_equal 2, para.document.references[:indexterms].size - assert_equal ['tiger'], para.document.references[:indexterms][0] - assert_equal ['cat'], para.document.references[:indexterms][1] + #assert_equal 2, para.document.catalog[:indexterms].size + #assert_equal ['tiger'], para.document.catalog[:indexterms][0] + #assert_equal ['cat'], para.document.catalog[:indexterms][1] + end + + test 'should escape visible index term if preceded by a backslash' do + sentence = "The #{BACKSLASH}((tiger)) (Panthera tigris) is the largest #{BACKSLASH}((cat)) species." + para = block_from_string(sentence) + output = para.sub_macros(para.source) + assert_equal 'The ((tiger)) (Panthera tigris) is the largest ((cat)) species.', output + #assert para.document.catalog[:indexterms].empty? end test 'normal substitutions are performed on an index term 2 macro' do sentence = 'The ((*tiger*)) (Panthera tigris) is the largest cat species.' para = block_from_string sentence - output = para.apply_normal_subs(para.lines) + output = para.apply_subs(para.source) assert_equal 'The tiger (Panthera tigris) is the largest cat species.', output - assert_equal 1, para.document.references[:indexterms].size - assert_equal ['tiger'], para.document.references[:indexterms].first + #assert_equal 1, para.document.catalog[:indexterms].size + #assert_equal ['tiger'], para.document.catalog[:indexterms].first end test 'index term 2 macro with round bracket syntex should not interfer with index term macro with round bracket syntax' do @@ -1085,136 +1349,296 @@ para = block_from_string sentence output = para.sub_macros(para.source) assert_equal "The panthera tigris is the largest cat species.\n", output - terms = para.document.references[:indexterms] - assert_equal 2, terms.size - assert_equal ['panthera tigris'], terms[0] - assert_equal ['Big cats', 'Tigers'], terms[1] + #terms = para.document.catalog[:indexterms] + #assert_equal 2, terms.size + #assert_equal ['panthera tigris'], terms[0] + #assert_equal ['Big cats', 'Tigers'], terms[1] + end + + test 'should parse visible shorthand index term with see and seealso' do + sentence = '((Flash >> HTML 5)) has been supplanted by ((HTML 5 &> CSS 3 &> SVG)).' + output = convert_string_to_embedded sentence, backend: 'docbook' + indexterm_flash = <<~'EOS'.chop + + Flash + HTML 5 + + EOS + indexterm_html5 = <<~'EOS'.chop + + HTML 5 + CSS 3 + SVG + + EOS + assert_includes output, indexterm_flash + assert_includes output, indexterm_html5 + end + + test 'should parse concealed shorthand index term with see and seealso' do + sentence = 'Flash(((Flash >> HTML 5))) has been supplanted by HTML 5(((HTML 5 &> CSS 3 &> SVG))).' + output = convert_string_to_embedded sentence, backend: 'docbook' + indexterm_flash = <<~'EOS'.chop + + Flash + HTML 5 + + EOS + indexterm_html5 = <<~'EOS'.chop + + HTML 5 + CSS 3 + SVG + + EOS + assert_includes output, indexterm_flash + assert_includes output, indexterm_html5 + end + + test 'should parse visible index term macro with see and seealso' do + sentence = 'indexterm2:[Flash,see=HTML 5] has been supplanted by indexterm2:[HTML 5,see-also="CSS 3, SVG"].' + output = convert_string_to_embedded sentence, backend: 'docbook' + indexterm_flash = <<~'EOS'.chop + + Flash + HTML 5 + + EOS + indexterm_html5 = <<~'EOS'.chop + + HTML 5 + CSS 3 + SVG + + EOS + assert_includes output, indexterm_flash + assert_includes output, indexterm_html5 + end + + test 'should parse concealed index term macro with see and seealso' do + sentence = 'Flashindexterm:[Flash,see=HTML 5] has been supplanted by HTML 5indexterm:[HTML 5,see-also="CSS 3, SVG"].' + output = convert_string_to_embedded sentence, backend: 'docbook' + indexterm_flash = <<~'EOS'.chop + + Flash + HTML 5 + + EOS + indexterm_html5 = <<~'EOS'.chop + + HTML 5 + CSS 3 + SVG + + EOS + assert_includes output, indexterm_flash + assert_includes output, indexterm_html5 end context 'Button macro' do test 'btn macro' do - para = block_from_string('btn:[Save]', :attributes => {'experimental' => ''}) + para = block_from_string('btn:[Save]', attributes: { 'experimental' => '' }) assert_equal %q{Save}, para.sub_macros(para.source) end + test 'btn macro that spans multiple lines' do + para = block_from_string(%(btn:[Rebase and\nmerge]), attributes: { 'experimental' => '' }) + assert_equal %q{Rebase and merge}, para.sub_macros(para.source) + end + test 'btn macro for docbook backend' do - para = block_from_string('btn:[Save]', :backend => 'docbook', :attributes => {'experimental' => ''}) + para = block_from_string('btn:[Save]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{Save}, para.sub_macros(para.source) end end context 'Keyboard macro' do test 'kbd macro with single key' do - para = block_from_string('kbd:[F3]', :attributes => {'experimental' => ''}) + para = block_from_string('kbd:[F3]', attributes: { 'experimental' => '' }) assert_equal %q{F3}, para.sub_macros(para.source) end + test 'kbd macro with single backslash key' do + para = block_from_string("kbd:[#{BACKSLASH} ]", attributes: { 'experimental' => '' }) + assert_equal %q(\), para.sub_macros(para.source) + end + test 'kbd macro with single key, docbook backend' do - para = block_from_string('kbd:[F3]', :backend => 'docbook', :attributes => {'experimental' => ''}) + para = block_from_string('kbd:[F3]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{F3}, para.sub_macros(para.source) end test 'kbd macro with key combination' do - para = block_from_string('kbd:[Ctrl+Shift+T]', :attributes => {'experimental' => ''}) + para = block_from_string('kbd:[Ctrl+Shift+T]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end - test 'kbd macro with key combination with spaces' do - para = block_from_string('kbd:[Ctrl + Shift + T]', :attributes => {'experimental' => ''}) + test 'kbd macro with key combination that spans multiple lines' do + para = block_from_string(%(kbd:[Ctrl +\nT]), attributes: { 'experimental' => '' }) + assert_equal %q{Ctrl+T}, para.sub_macros(para.source) + end + + test 'kbd macro with key combination, docbook backend' do + para = block_from_string('kbd:[Ctrl+Shift+T]', backend: 'docbook', attributes: { 'experimental' => '' }) + assert_equal %q{CtrlShiftT}, para.sub_macros(para.source) + end + + test 'kbd macro with key combination delimited by pluses with spaces' do + para = block_from_string('kbd:[Ctrl + Shift + T]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by commas' do - para = block_from_string('kbd:[Ctrl,Shift,T]', :attributes => {'experimental' => ''}) + para = block_from_string('kbd:[Ctrl,Shift,T]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end - test 'kbd macro with key combination containing a plus key no spaces' do - para = block_from_string('kbd:[Ctrl++]', :attributes => {'experimental' => ''}) - assert_equal %q{Ctrl++}, para.sub_macros(para.source) + test 'kbd macro with key combination delimited by commas with spaces' do + para = block_from_string('kbd:[Ctrl, Shift, T]', attributes: { 'experimental' => '' }) + assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end - test 'kbd macro with key combination delimited by commands containing a comma key' do - para = block_from_string('kbd:[Ctrl,,]', :attributes => {'experimental' => ''}) + test 'kbd macro with key combination delimited by plus containing a comma key' do + para = block_from_string('kbd:[Ctrl+,]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+,}, para.sub_macros(para.source) end - test 'kbd macro with key combination containing a plus key with spaces' do - para = block_from_string('kbd:[Ctrl + +]', :attributes => {'experimental' => ''}) + test 'kbd macro with key combination delimited by commas containing a plus key' do + para = block_from_string('kbd:[Ctrl, +, Shift]', attributes: { 'experimental' => '' }) + assert_equal %q{Ctrl+++Shift}, para.sub_macros(para.source) + end + + test 'kbd macro with key combination where last key matches plus delimiter' do + para = block_from_string('kbd:[Ctrl + +]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl++}, para.sub_macros(para.source) end + test 'kbd macro with key combination where last key matches comma delimiter' do + para = block_from_string('kbd:[Ctrl, ,]', attributes: { 'experimental' => '' }) + assert_equal %q{Ctrl+,}, para.sub_macros(para.source) + end + test 'kbd macro with key combination containing escaped bracket' do - para = block_from_string('kbd:[Ctrl + \]]', :attributes => {'experimental' => ''}) + para = block_from_string('kbd:[Ctrl + \]]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+]}, para.sub_macros(para.source) end - test 'kbd macro with key combination, docbook backend' do - para = block_from_string('kbd:[Ctrl+Shift+T]', :backend => 'docbook', :attributes => {'experimental' => ''}) - assert_equal %q{CtrlShiftT}, para.sub_macros(para.source) + test 'kbd macro with key combination ending in backslash' do + para = block_from_string("kbd:[Ctrl + #{BACKSLASH} ]", attributes: { 'experimental' => '' }) + assert_equal %q(Ctrl+\\), para.sub_macros(para.source) + end + + test 'kbd macro looks for delimiter beyond first character' do + para = block_from_string('kbd:[,te]', attributes: { 'experimental' => '' }) + assert_equal %q(,te), para.sub_macros(para.source) + end + + test 'kbd macro restores trailing delimiter as key value' do + para = block_from_string('kbd:[te,]', attributes: { 'experimental' => '' }) + assert_equal %q(te,), para.sub_macros(para.source) end end context 'Menu macro' do test 'should process menu using macro sytnax' do - para = block_from_string('menu:File[]', :attributes => {'experimental' => ''}) - assert_equal %q{File}, para.sub_macros(para.source) + para = block_from_string('menu:File[]', attributes: { 'experimental' => '' }) + assert_equal %q{File}, para.sub_macros(para.source) end test 'should process menu for docbook backend' do - para = block_from_string('menu:File[]', :backend => 'docbook', :attributes => {'experimental' => ''}) + para = block_from_string('menu:File[]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{File}, para.sub_macros(para.source) end + test 'should process multiple menu macros in same line' do + para = block_from_string('menu:File[] and menu:Edit[]', attributes: { 'experimental' => '' }) + assert_equal 'File and Edit', para.sub_macros(para.source) + end + test 'should process menu with menu item using macro syntax' do - para = block_from_string('menu:File[Save As…]', :attributes => {'experimental' => ''}) - assert_equal %q{File ▸ Save As…}, para.sub_macros(para.source) + para = block_from_string('menu:File[Save As…]', attributes: { 'experimental' => '' }) + assert_equal %q{File  Save As…}, para.sub_macros(para.source) + end + + test 'should process menu macro that spans multiple lines' do + input = %(menu:Preferences[Compile\non\nSave]) + para = block_from_string input, attributes: { 'experimental' => '' } + assert_equal %(Preferences  Compile\non\nSave), para.sub_macros(para.source) + end + + test 'should unescape escaped closing bracket in menu macro' do + input = 'menu:Preferences[Compile [on\\] Save]' + para = block_from_string input, attributes: { 'experimental' => '' } + assert_equal %q(Preferences  Compile [on] Save), para.sub_macros(para.source) + end + + test 'should process menu with menu item using macro syntax when fonts icons are enabled' do + para = block_from_string('menu:Tools[More Tools > Extensions]', attributes: { 'experimental' => '', 'icons' => 'font' }) + assert_equal %q{Tools  More Tools  Extensions}, para.sub_macros(para.source) end test 'should process menu with menu item for docbook backend' do - para = block_from_string('menu:File[Save As…]', :backend => 'docbook', :attributes => {'experimental' => ''}) + para = block_from_string('menu:File[Save As…]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{File Save As…}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using macro syntax' do - para = block_from_string('menu:Tools[Project > Build]', :attributes => {'experimental' => ''}) - assert_equal %q{Tools ▸ Project ▸ Build}, para.sub_macros(para.source) + para = block_from_string('menu:Tools[Project > Build]', attributes: { 'experimental' => '' }) + assert_equal %q{Tools  Project  Build}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu for docbook backend' do - para = block_from_string('menu:Tools[Project > Build]', :backend => 'docbook', :attributes => {'experimental' => ''}) + para = block_from_string('menu:Tools[Project > Build]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{Tools Project Build}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using macro syntax and comma delimiter' do - para = block_from_string('menu:Tools[Project, Build]', :attributes => {'experimental' => ''}) - assert_equal %q{Tools ▸ Project ▸ Build}, para.sub_macros(para.source) + para = block_from_string('menu:Tools[Project, Build]', attributes: { 'experimental' => '' }) + assert_equal %q{Tools  Project  Build}, para.sub_macros(para.source) end test 'should process menu with menu item using inline syntax' do - para = block_from_string('"File > Save As…"', :attributes => {'experimental' => ''}) - assert_equal %q{File ▸ Save As…}, para.sub_macros(para.source) + para = block_from_string('"File > Save As…"', attributes: { 'experimental' => '' }) + assert_equal %q{File  Save As…}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using inline syntax' do - para = block_from_string('"Tools > Project > Build"', :attributes => {'experimental' => ''}) - assert_equal %q{Tools ▸ Project ▸ Build}, para.sub_macros(para.source) + para = block_from_string('"Tools > Project > Build"', attributes: { 'experimental' => '' }) + assert_equal %q{Tools  Project  Build}, para.sub_macros(para.source) end - test 'inline syntax should not closing quote of XML attribute' do - para = block_from_string('<node>r', :attributes => {'experimental' => ''}) + test 'inline menu syntax should not match closing quote of XML attribute' do + para = block_from_string('<node>r', attributes: { 'experimental' => '' }) assert_equal %q{<node>r}, para.sub_macros(para.source) end test 'should process menu macro with items containing multibyte characters' do - para = block_from_string('menu:视图[放大, 重置]', :attributes => {'experimental' => ''}) - assert_equal %q{视图 ▸ 放大 ▸ 重置}, para.sub_macros(para.source) - end if ::RUBY_MIN_VERSION_1_9 + para = block_from_string('menu:视图[放大, 重置]', attributes: { 'experimental' => '' }) + assert_equal %q{视图  放大  重置}, para.sub_macros(para.source) + end test 'should process inline menu with items containing multibyte characters' do - para = block_from_string('"视图 > 放大 > 重置"', :attributes => {'experimental' => ''}) - assert_equal %q{视图 ▸ 放大 ▸ 重置}, para.sub_macros(para.source) - end if ::RUBY_MIN_VERSION_1_9 + para = block_from_string('"视图 > 放大 > 重置"', attributes: { 'experimental' => '' }) + assert_equal %q{视图  放大  重置}, para.sub_macros(para.source) + end + + test 'should process a menu macro with a target that begins with a character reference' do + para = block_from_string('menu:⋮[More Tools, Extensions]', attributes: { 'experimental' => '' }) + assert_equal %q{  More Tools  Extensions}, para.sub_macros(para.source) + end + + test 'should not process a menu macro with a target that ends with a space' do + input = 'menu:foo [bar] menu:File[Save]' + para = block_from_string input, attributes: { 'experimental' => '' } + result = para.sub_macros para.source + assert_xpath '/span[@class="menuseq"]', result, 1 + assert_xpath '//b[@class="menu"][text()="File"]', result, 1 + end + + test 'should process an inline menu that begins with a character reference' do + para = block_from_string('"⋮ > More Tools > Extensions"', attributes: { 'experimental' => '' }) + assert_equal %q{  More Tools  Extensions}, para.sub_macros(para.source) + end end end @@ -1222,88 +1646,152 @@ test 'collect inline triple plus passthroughs' do para = block_from_string('+++inline code+++') result = para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result - assert_equal 1, para.passthroughs.size - assert_equal 'inline code', para.passthroughs[0][:text] - assert para.passthroughs[0][:subs].empty? + assert_equal 1, passthroughs.size + assert_equal 'inline code', passthroughs[0][:text] + assert_empty passthroughs[0][:subs] end test 'collect multi-line inline triple plus passthroughs' do para = block_from_string("+++inline\ncode+++") result = para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result - assert_equal 1, para.passthroughs.size - assert_equal "inline\ncode", para.passthroughs[0][:text] - assert para.passthroughs[0][:subs].empty? + assert_equal 1, passthroughs.size + assert_equal "inline\ncode", passthroughs[0][:text] + assert_empty passthroughs[0][:subs] end test 'collect inline double dollar passthroughs' do para = block_from_string('$${code}$$') result = para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result - assert_equal 1, para.passthroughs.size - assert_equal '{code}', para.passthroughs[0][:text] - assert_equal [:specialcharacters], para.passthroughs[0][:subs] + assert_equal 1, passthroughs.size + assert_equal '{code}', passthroughs[0][:text] + assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'collect inline double plus passthroughs' do para = block_from_string('++{code}++') result = para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result - assert_equal 1, para.passthroughs.size - assert_equal '{code}', para.passthroughs[0][:text] - assert_equal [:specialcharacters], para.passthroughs[0][:subs] + assert_equal 1, passthroughs.size + assert_equal '{code}', passthroughs[0][:text] + assert_equal [:specialcharacters], passthroughs[0][:subs] + end + + test 'should not crash if role on passthrough is enclosed in quotes' do + %W( + ['role']#{BACKSLASH}++This++++++++++++ + ['role']#{BACKSLASH}+++++++++This++++++++++++ + ).each do |input| + para = block_from_string input + assert_includes para.content, %() + end + end + + test 'should allow inline double plus passthrough to be escaped using backslash' do + para = block_from_string("you need to replace `int a = n#{BACKSLASH}++;` with `int a = ++n;`!") + result = para.apply_subs para.source + assert_equal 'you need to replace int a = n++; with int a = ++n;!', result + end + + test 'should allow inline double plus passthrough with attributes to be escaped using backslash' do + para = block_from_string("=[attrs]#{BACKSLASH}#{BACKSLASH}++text++") + result = para.apply_subs para.source + assert_equal '=[attrs]++text++', result end test 'collect multi-line inline double dollar passthroughs' do para = block_from_string("$$\n{code}\n$$") result = para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result - assert_equal 1, para.passthroughs.size - assert_equal "\n{code}\n", para.passthroughs[0][:text] - assert_equal [:specialcharacters], para.passthroughs[0][:subs] + assert_equal 1, passthroughs.size + assert_equal "\n{code}\n", passthroughs[0][:text] + assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'collect multi-line inline double plus passthroughs' do para = block_from_string("++\n{code}\n++") result = para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result - assert_equal 1, para.passthroughs.size - assert_equal "\n{code}\n", para.passthroughs[0][:text] - assert_equal [:specialcharacters], para.passthroughs[0][:subs] + assert_equal 1, passthroughs.size + assert_equal "\n{code}\n", passthroughs[0][:text] + assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'collect passthroughs from inline pass macro' do para = block_from_string(%Q{pass:specialcharacters,quotes[['code'\\]]}) result = para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result - assert_equal 1, para.passthroughs.size - assert_equal %q{['code']}, para.passthroughs[0][:text] - assert_equal [:specialcharacters, :quotes], para.passthroughs[0][:subs] + assert_equal 1, passthroughs.size + assert_equal %q{['code']}, passthroughs[0][:text] + assert_equal [:specialcharacters, :quotes], passthroughs[0][:subs] end test 'collect multi-line passthroughs from inline pass macro' do para = block_from_string(%Q{pass:specialcharacters,quotes[['more\ncode'\\]]}) result = para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result - assert_equal 1, para.passthroughs.size - assert_equal %Q{['more\ncode']}, para.passthroughs[0][:text] - assert_equal [:specialcharacters, :quotes], para.passthroughs[0][:subs] + assert_equal 1, passthroughs.size + assert_equal %Q{['more\ncode']}, passthroughs[0][:text] + assert_equal [:specialcharacters, :quotes], passthroughs[0][:subs] + end + + test 'should find and replace placeholder duplicated by substitution' do + input = %q(+first passthrough+ followed by link:$$http://example.com/__u_no_format_me__$$[] with passthrough) + result = convert_inline_string input + assert_equal 'first passthrough followed by http://example.com/__u_no_format_me__ with passthrough', result end test 'resolves sub shorthands on inline pass macro' do para = block_from_string 'pass:q,a[*<{backend}>*]' result = para.extract_passthroughs para.source - assert_equal 1, para.passthroughs.size - assert_equal [:quotes, :attributes], para.passthroughs[0][:subs] + passthroughs = para.instance_variable_get :@passthroughs + assert_equal 1, passthroughs.size + assert_equal [:quotes, :attributes], passthroughs[0][:subs] result = para.restore_passthroughs result assert_equal '', result end + test 'inline pass macro supports incremental subs' do + para = block_from_string 'pass:n,-a[<{backend}>]' + result = para.extract_passthroughs para.source + passthroughs = para.instance_variable_get :@passthroughs + assert_equal 1, passthroughs.size + result = para.restore_passthroughs result + assert_equal '<{backend}>', result + end + + test 'should not recognize pass macro with invalid subsitution list' do + [',', '42', 'a,'].each do |subs| + para = block_from_string %(pass:#{subs}[foobar]) + result = para.extract_passthroughs para.source + assert_equal %(pass:#{subs}[foobar]), result + end + end + + test 'should allow content of inline pass macro to be empty' do + para = block_from_string 'pass:[]' + result = para.extract_passthroughs para.source + passthroughs = para.instance_variable_get :@passthroughs + assert_equal 1, passthroughs.size + assert_equal '', para.restore_passthroughs(result) + end + # NOTE placeholder is surrounded by text to prevent reader from stripping trailing boundary char (unique to test scenario) test 'restore inline passthroughs without subs' do para = block_from_string("some #{Asciidoctor::Substitutors::PASS_START}" + '0' + "#{Asciidoctor::Substitutors::PASS_END} to study") - para.passthroughs[0] = {:text => 'inline code', :subs => []} + para.extract_passthroughs '' + passthroughs = para.instance_variable_get :@passthroughs + passthroughs[0] = { text: 'inline code', subs: [] } result = para.restore_passthroughs(para.source) assert_equal "some inline code to study", result end @@ -1311,34 +1799,43 @@ # NOTE placeholder is surrounded by text to prevent reader from stripping trailing boundary char (unique to test scenario) test 'restore inline passthroughs with subs' do para = block_from_string("some #{Asciidoctor::Substitutors::PASS_START}" + '0' + "#{Asciidoctor::Substitutors::PASS_END} to study in the #{Asciidoctor::Substitutors::PASS_START}" + '1' + "#{Asciidoctor::Substitutors::PASS_END} programming language") - para.passthroughs[0] = {:text => '{code}', :subs => [:specialcharacters]} - para.passthroughs[1] = {:text => '{language}', :subs => [:specialcharacters]} + para.extract_passthroughs '' + passthroughs = para.instance_variable_get :@passthroughs + passthroughs[0] = { text: '{code}', subs: [:specialcharacters] } + passthroughs[1] = { text: '{language}', subs: [:specialcharacters] } result = para.restore_passthroughs(para.source) assert_equal 'some <code>{code}</code> to study in the {language} programming language', result end test 'should restore nested passthroughs' do - result = render_embedded_string %q(+Sometimes you feel pass:q[`mono`].+ Sometimes you +$$don't$$+.), :doctype => :inline + result = convert_inline_string %q(+Sometimes you feel pass:q[`mono`].+ Sometimes you +$$don't$$+.) assert_equal %q(Sometimes you feel mono. Sometimes you don't.), result end + test 'should not fail to restore remaining passthroughs after processing inline passthrough with macro substitution' do + input = 'pass:m[.] pass:[.]' + assert_equal '. .', (convert_inline_string input) + end + test 'should honor role on double plus passthrough' do - result = render_embedded_string 'Print the version using [var]++{asciidoctor-version}++.', :doctype => :inline + result = convert_inline_string 'Print the version using [var]++{asciidoctor-version}++.' assert_equal 'Print the version using {asciidoctor-version}.', result end test 'complex inline passthrough macro' do text_to_escape = %q{[(] <'basic form'> <'logical operator'> <'basic form'> [)]} para = block_from_string %($$#{text_to_escape}$$) - result = para.extract_passthroughs(para.source) - assert_equal 1, para.passthroughs.size - assert_equal text_to_escape, para.passthroughs[0][:text] + para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs + assert_equal 1, passthroughs.size + assert_equal text_to_escape, passthroughs[0][:text] text_to_escape_escaped = %q{[(\] <'basic form'> <'logical operator'> <'basic form'> [)\]} para = block_from_string %(pass:specialcharacters[#{text_to_escape_escaped}]) - result = para.extract_passthroughs(para.source) - assert_equal 1, para.passthroughs.size - assert_equal text_to_escape, para.passthroughs[0][:text] + para.extract_passthroughs(para.source) + passthroughs = para.instance_variable_get :@passthroughs + assert_equal 1, passthroughs.size + assert_equal text_to_escape, passthroughs[0][:text] end test 'inline pass macro with a composite sub' do @@ -1365,23 +1862,39 @@ assert_equal '\$a < b\$', para.content end - # NOTE this test doesn't work once AsciiMath has been loaded - #test 'should not perform specialcharacters subs on asciimath macro content in docbook backend by default' do - # input = 'asciimath:[a < b]' - # para = block_from_string input, :backend => :docbook - # para.document.converter.instance_variable_set :@asciimath_available, false - # assert_equal '', para.content - #end + test 'should convert contents of asciimath macro to MathML in DocBook output if asciimath gem is available' do + asciimath_available = !(Asciidoctor::Helpers.require_library 'asciimath', true, :ignore).nil? + input = 'asciimath:[a < b]' + expected = 'a<b' + using_memory_logger do |logger| + para = block_from_string input, backend: :docbook + actual = para.content + if asciimath_available + assert_equal expected, actual + assert_equal :loaded, para.document.converter.instance_variable_get(:@asciimath_status) + else + assert_message logger, :WARN, 'optional gem \'asciimath\' is not available. Functionality disabled.' + assert_equal :unavailable, para.document.converter.instance_variable_get(:@asciimath_status) + end + end + end - test 'should convert asciimath macro content to MathML when asciimath gem is available' do + test 'should not perform specialcharacters subs on asciimath macro content in Docbook output if asciimath gem not available' do + asciimath_available = !(Asciidoctor::Helpers.require_library 'asciimath', true, :ignore).nil? input = 'asciimath:[a < b]' - para = block_from_string input, :backend => :docbook - assert_equal 'a<b', para.content + para = block_from_string input, backend: :docbook + para.document.converter.instance_variable_set :@asciimath_status, :unavailable + if asciimath_available + old_asciimath = ::AsciiMath + Object.send :remove_const, 'AsciiMath' + end + assert_equal '', para.content + ::AsciiMath = old_asciimath if asciimath_available end test 'should honor explicit subslist on asciimath macro' do input = 'asciimath:attributes[{expr}]' - para = block_from_string input, :attributes => {'expr' => 'x != 0'} + para = block_from_string input, attributes: { 'expr' => 'x != 0' } assert_equal '\$x != 0\$', para.content end @@ -1391,12 +1904,24 @@ assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content end + test 'should strip legacy LaTeX math delimiters around latexmath content if present' do + input = 'latexmath:[$C = \alpha + \beta Y^{\gamma} + \epsilon$]' + para = block_from_string input + assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content + end + test 'should not recognize latexmath macro with no content' do input = 'latexmath:[]' para = block_from_string input assert_equal 'latexmath:[]', para.content end + test 'should unescape escaped square bracket in equation' do + input = 'latexmath:[\sqrt[3\]{x}]' + para = block_from_string input + assert_equal '\(\sqrt[3]{x}\)', para.content + end + test 'should perform specialcharacters subs on latexmath macro in html backend by default' do input = 'latexmath:[a < b]' para = block_from_string input @@ -1405,19 +1930,19 @@ test 'should not perform specialcharacters subs on latexmath macro content in docbook backend by default' do input = 'latexmath:[a < b]' - para = block_from_string input, :backend => :docbook + para = block_from_string input, backend: :docbook assert_equal '', para.content end test 'should honor explicit subslist on latexmath macro' do input = 'latexmath:attributes[{expr}]' - para = block_from_string input, :attributes => {'expr' => '\sqrt{4} = 2'} + para = block_from_string input, attributes: { 'expr' => '\sqrt{4} = 2' } assert_equal '\(\sqrt{4} = 2\)', para.content end test 'should passthrough math macro inside another passthrough' do input = 'the text `asciimath:[x = y]` should be passed through as +literal+ text' - para = block_from_string input, :attributes => {'compat-mode' => ''} + para = block_from_string input, attributes: { 'compat-mode' => '' } assert_equal 'the text asciimath:[x = y] should be passed through as literal text', para.content input = 'the text [x-]`asciimath:[x = y]` should be passed through as `literal` text' @@ -1429,62 +1954,107 @@ assert_equal 'the text asciimath:[x = y] should be passed through as literal text', para.content end + test 'should support attrlist on a literal monospace phrase' do + input = '[.baz]`+foo--bar+`' + para = block_from_string input + assert_equal 'foo--bar', para.content + end + + test 'should not process an escaped passthrough macro inside a monospaced phrase' do + input = 'use the `\pass:c[]` macro' + para = block_from_string input + assert_equal 'use the pass:c[] macro', para.content + end + + test 'should not process an escaped passthrough macro inside a monospaced phrase with attributes' do + input = 'use the [syntax]`\pass:c[]` macro' + para = block_from_string input + assert_equal 'use the pass:c[] macro', para.content + end + + test 'should honor an escaped single plus passthrough inside a monospaced phrase' do + input = 'use `\+{author}+` to show an attribute reference' + para = block_from_string input + assert_equal 'use +{author}+ to show an attribute reference', para.content + end + test 'should not recognize stem macro with no content' do input = 'stem:[]' para = block_from_string input assert_equal input, para.content end - test 'should passthrough text in stem macro and surround with AsciiMath delimiters if stem attribute != latexmath' do + test 'should passthrough text in stem macro and surround with AsciiMath delimiters if stem attribute is asciimath, empty, or not set' do [ {}, - {'stem' => ''}, - {'stem' => 'asciimath'} + { 'stem' => '' }, + { 'stem' => 'asciimath' }, + { 'stem' => 'bogus' }, ].each do |attributes| input = 'stem:[x/x={(1,if x!=0),(text{undefined},if x=0):}]' - para = block_from_string input, :attributes => attributes + para = block_from_string input, attributes: attributes assert_equal '\$x/x={(1,if x!=0),(text{undefined},if x=0):}\$', para.content end end - test 'should passthrough text in stem macro and surround with LaTeX math delimiters if stem attribute = latexmath' do - input = 'stem:[C = \alpha + \beta Y^{\gamma} + \epsilon]' - para = block_from_string input, :attributes => {'stem' => 'latexmath'} - assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content + test 'should passthrough text in stem macro and surround with LaTeX math delimiters if stem attribute is latexmath, latex, or tex' do + [ + { 'stem' => 'latexmath' }, + { 'stem' => 'latex' }, + { 'stem' => 'tex' }, + ].each do |attributes| + input = 'stem:[C = \alpha + \beta Y^{\gamma} + \epsilon]' + para = block_from_string input, attributes: attributes + assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content + end + end + + test 'should apply substitutions specified on stem macro' do + ['stem:c,a[sqrt(x) <=> {solve-for-x}]', 'stem:n,-r[sqrt(x) <=> {solve-for-x}]'].each do |input| + para = block_from_string input, attributes: { 'stem' => 'asciimath', 'solve-for-x' => '13' } + assert_equal '\$sqrt(x) <=> 13\$', para.content + end end - test 'should find and replace placeholder duplicated by substitution' do - input = %q(+first passthrough+ followed by link:$$http://example.com/__u_no_format_me__$$[] with passthrough) - result = render_embedded_string input, :doctype => :inline - assert_equal 'first passthrough followed by http://example.com/__u_no_format_me__ with passthrough', result + test 'should not recognize stem macro with invalid substitution list' do + [',', '42', 'a,'].each do |subs| + input = %(stem:#{subs}[x^2]) + para = block_from_string input, attributes: { 'stem' => 'asciimath' } + assert_equal %(stem:#{subs}[x^2]), para.content + end end end end context 'Replacements' do test 'unescapes XML entities' do - para = block_from_string '< " " " >' - assert_equal '< " " " >', para.apply_normal_subs(para.lines) + para = block_from_string '< " ∴ " " >' + assert_equal '< " ∴ " " >', para.apply_subs(para.source) end test 'replaces arrows' do para = block_from_string '<- -> <= => \<- \-> \<= \=>' - assert_equal '← → ⇐ ⇒ <- -> <= =>', para.apply_normal_subs(para.source) + assert_equal '← → ⇐ ⇒ <- -> <= =>', para.apply_subs(para.source) end test 'replaces dashes' do - para = block_from_string %(-- foo foo--bar foo\\--bar foo -- bar foo \\-- bar -stuff in between --- foo -stuff in between -foo -- -stuff in between -foo --) - expected = ' — foo foo—​bar foo--bar foo — bar foo -- bar -stuff in between — foo -stuff in between -foo — stuff in between -foo — ' + input = <<~'EOS' + -- foo foo--bar foo\--bar foo -- bar foo \-- bar + stuff in between + -- foo + stuff in between + foo -- + stuff in between + foo -- + EOS + expected = <<~'EOS'.chop +  — foo foo—​bar foo--bar foo — bar foo -- bar + stuff in between — foo + stuff in between + foo — stuff in between + foo —  + EOS + para = block_from_string input assert_equal expected, para.sub_replacements(para.source) end @@ -1492,7 +2062,7 @@ para = block_from_string %(富--巴) expected = '富—​巴' assert_equal expected, para.sub_replacements(para.source) - end if ::RUBY_MIN_VERSION_1_9 + end test 'replaces marks' do para = block_from_string '(C) (R) (TM) \(C) \(R) \(TM)' @@ -1501,13 +2071,13 @@ test 'preserves entity references' do input = '& © ✔ 😀 • 😀' - result = render_embedded_string input, :doctype => :inline + result = convert_inline_string input assert_equal input, result end test 'only preserves named entities with two or more letters' do input = '& &a; >' - result = render_embedded_string input, :doctype => :inline + result = convert_inline_string input assert_equal '& &a; >', result end @@ -1541,139 +2111,125 @@ %(he is 6' tall), %(`') ] - given.size.times {|i| + given.size.times do |i| para = block_from_string given[i] assert_equal expected[i], para.sub_replacements(para.source) - } + end end end context 'Post replacements' do test 'line break inserted after line with line break character' do para = block_from_string("First line +\nSecond line") - result = para.apply_subs(para.lines, :post_replacements, true) + result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line
    ', result.first end test 'line break inserted after line wrap with hardbreaks enabled' do - para = block_from_string("First line\nSecond line", :attributes => {'hardbreaks' => ''}) - result = para.apply_subs(para.lines, :post_replacements, true) + para = block_from_string("First line\nSecond line", attributes: { 'hardbreaks' => '' }) + result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line
    ', result.first end test 'line break character stripped from end of line with hardbreaks enabled' do - para = block_from_string("First line +\nSecond line", :attributes => {'hardbreaks' => ''}) - result = para.apply_subs(para.lines, :post_replacements, true) + para = block_from_string("First line +\nSecond line", attributes: { 'hardbreaks' => '' }) + result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line
    ', result.first end test 'line break not inserted for single line with hardbreaks enabled' do - para = block_from_string('First line', :attributes => {'hardbreaks' => ''}) - result = para.apply_subs(para.lines, :post_replacements, true) + para = block_from_string('First line', attributes: { 'hardbreaks' => '' }) + result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line', result.first end end context 'Resolve subs' do test 'should resolve subs for block' do - block = Asciidoctor::Block.new(empty_document, :paragraph) + doc = empty_document parse: true + block = Asciidoctor::Block.new doc, :paragraph block.attributes['subs'] = 'quotes,normal' - block.lock_in_subs + block.commit_subs assert_equal [:quotes, :specialcharacters, :attributes, :replacements, :macros, :post_replacements], block.subs end test 'should resolve specialcharacters sub as highlight for source block when source highlighter is coderay' do - doc = empty_document :attributes => {'source-highlighter' => 'coderay'} - block = Asciidoctor::Block.new(doc, :listing, :content_model => :verbatim) + doc = empty_document attributes: { 'source-highlighter' => 'coderay' }, parse: true + block = Asciidoctor::Block.new doc, :listing, content_model: :verbatim block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' - block.lock_in_subs + block.commit_subs assert_equal [:highlight], block.subs end test 'should resolve specialcharacters sub as highlight for source block when source highlighter is pygments' do - doc = empty_document :attributes => {'source-highlighter' => 'pygments'} - block = Asciidoctor::Block.new(doc, :listing, :content_model => :verbatim) + doc = empty_document attributes: { 'source-highlighter' => 'pygments' }, parse: true + block = Asciidoctor::Block.new doc, :listing, content_model: :verbatim block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' - block.lock_in_subs + block.commit_subs assert_equal [:highlight], block.subs - end + end if ENV['PYGMENTS'] - test 'should not resolve specialcharacters sub as highlight for source block when source highlighter is not set' do - doc = empty_document - block = Asciidoctor::Block.new(doc, :listing, :content_model => :verbatim) + test 'should not replace specialcharacters sub with highlight for source block when source highlighter is not set' do + doc = empty_document parse: true + block = Asciidoctor::Block.new doc, :listing, content_model: :verbatim block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' - block.lock_in_subs + block.commit_subs assert_equal [:specialcharacters], block.subs end test 'should not use subs if subs option passed to block constructor is nil' do - doc = empty_document - block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => nil, :attributes => {'subs' => 'quotes'} - assert block.subs.empty? - block.lock_in_subs - assert block.subs.empty? + doc = empty_document parse: true + block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: nil, attributes: { 'subs' => 'quotes' } + assert_empty block.subs + block.commit_subs + assert_empty block.subs end test 'should not use subs if subs option passed to block constructor is empty array' do - doc = empty_document - block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => [], :attributes => {'subs' => 'quotes'} - assert block.subs.empty? - block.lock_in_subs - assert block.subs.empty? + doc = empty_document parse: true + block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: [], attributes: { 'subs' => 'quotes' } + assert_empty block.subs + block.commit_subs + assert_empty block.subs end test 'should use subs from subs option passed to block constructor' do - doc = empty_document - block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => [:specialcharacters], :attributes => {'subs' => 'quotes'} + doc = empty_document parse: true + block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: [:specialcharacters], attributes: { 'subs' => 'quotes' } assert_equal [:specialcharacters], block.subs - block.lock_in_subs + block.commit_subs assert_equal [:specialcharacters], block.subs end test 'should use subs from subs attribute if subs option is not passed to block constructor' do - doc = empty_document - block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :attributes => {'subs' => 'quotes'} - assert block.subs.empty? - # in this case, we have to call lock_in_subs to resolve the subs - block.lock_in_subs + doc = empty_document parse: true + block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', attributes: { 'subs' => 'quotes' } + assert_empty block.subs + # in this case, we have to call commit_subs to resolve the subs + block.commit_subs assert_equal [:quotes], block.subs end test 'should use subs from subs attribute if subs option passed to block constructor is :default' do - doc = empty_document - block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => :default, :attributes => {'subs' => 'quotes'} + doc = empty_document parse: true + block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: :default, attributes: { 'subs' => 'quotes' } assert_equal [:quotes], block.subs - block.lock_in_subs + block.commit_subs assert_equal [:quotes], block.subs end test 'should use built-in subs if subs option passed to block constructor is :default and subs attribute is absent' do - doc = empty_document - block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => :default + doc = empty_document parse: true + block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: :default assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], block.subs - block.lock_in_subs + block.commit_subs assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], block.subs end end - - # TODO move to helpers_test.rb - context 'Helpers' do - test 'should URI encode non-word characters generally' do - given = ' /%&?\\' - expect = '%20%2F%25%26%3F%5C' - assert_equal expect, (Asciidoctor::Helpers.encode_uri given) - end - - test 'should not URI select non-word characters' do - given = '-.!~*\';:@=+$,()[]' - expect = given - assert_equal expect, (Asciidoctor::Helpers.encode_uri given) - end - end end diff -Nru asciidoctor-1.5.5/test/syntax_highlighter_test.rb asciidoctor-2.0.10/test/syntax_highlighter_test.rb --- asciidoctor-1.5.5/test/syntax_highlighter_test.rb 1970-01-01 00:00:00.000000000 +0000 +++ asciidoctor-2.0.10/test/syntax_highlighter_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -0,0 +1,1089 @@ +# frozen_string_literal: true +require_relative 'test_helper' + +context 'Syntax Highlighter' do + test 'should set syntax_highlighter property on document if source highlighter is set and basebackend is html' do + input = <<~'EOS' + :source-highlighter: coderay + + [source, ruby] + ---- + puts 'Hello, World!' + ---- + EOS + doc = document_from_string input, safe: :safe, parse: true + assert doc.basebackend? 'html' + refute_nil doc.syntax_highlighter + assert_kind_of Asciidoctor::SyntaxHighlighter, doc.syntax_highlighter + end + + test 'should not set syntax_highlighter property on document if source highlighter is set and basebackend is not html' do + input = <<~'EOS' + :source-highlighter: coderay + + [source, ruby] + ---- + puts 'Hello, World!' + ---- + EOS + doc = document_from_string input, safe: :safe, backend: 'docbook', parse: true + refute doc.basebackend? 'html' + assert_nil doc.syntax_highlighter + end + + test 'should not set syntax_highlighter property on document if source highlighter is not set' do + input = <<~'EOS' + [source, ruby] + ---- + puts 'Hello, World!' + ---- + EOS + doc = document_from_string input, safe: :safe, parse: true + assert_nil doc.syntax_highlighter + end + + test 'should not set syntax_highlighter property on document if syntax highlighter cannot be resolved' do + input = <<~'EOS' + :source-highlighter: unknown + + [source, ruby] + ---- + puts 'Hello, World!' + ---- + EOS + doc = document_from_string input, safe: :safe, parse: true + assert_nil doc.syntax_highlighter + end + + test 'should not allow document to enable syntax highlighter if safe mode is at least SERVER' do + input = ':source-highlighter: coderay' + doc = document_from_string input, safe: Asciidoctor::SafeMode::SERVER, parse: true + assert_nil doc.attributes['source-highlighter'] + assert_nil doc.syntax_highlighter + end + + test 'should not invoke highlight method on syntax highlighter if highlight? is false' do + Class.new Asciidoctor::SyntaxHighlighter::Base do + register_for 'unavailable' + + def format node, language, opts + %(
    #{node.content}
    ) + end + + def highlight? + false + end + end + + input = <<~'EOS' + [source,ruby] + ---- + puts 'Hello, World!' + ---- + EOS + + doc = document_from_string input, attributes: { 'source-highlighter' => 'unavailable' } + output = doc.convert + assert_css 'pre.highlight > code.language-ruby', output, 1 + source_block = (doc.find_by {|candidate| candidate.style == 'source' })[0] + assert_raises NotImplementedError do + doc.syntax_highlighter.highlight source_block, source_block.source, (source_block.attr 'language'), {} + end + end + + test 'should be able to register syntax highlighter from syntax highlighter class itself' do + syntax_highlighter = Class.new Asciidoctor::SyntaxHighlighter::Base do + def format node, language, opts + %(
    #{node.content}
    ) + end + + def highlight? + false + end + end + + syntax_highlighter.register_for 'foobar' + assert_equal syntax_highlighter, (Asciidoctor::SyntaxHighlighter.for 'foobar') + end + + test 'should be able to register syntax highlighter using symbol' do + syntax_highlighter = Class.new Asciidoctor::SyntaxHighlighter::Base do + register_for :foobaz + + def format node, language, opts + %(
    #{node.content}
    ) + end + + def highlight? + false + end + end + + assert_equal syntax_highlighter, (Asciidoctor::SyntaxHighlighter.for 'foobaz') + end + + test 'should set language on output of source block when source-highlighter attribute is not set' do + input = <<~'EOS' + [source, ruby] + ---- + puts 'Hello, World!' + ---- + EOS + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE + assert_css 'pre.highlight', output, 1 + assert_css 'pre.highlight > code.language-ruby', output, 1 + assert_css 'pre.highlight > code.language-ruby[data-lang="ruby"]', output, 1 + end + + test 'should set language on output of source block when source-highlighter attribute is not recognized' do + input = <<~'EOS' + :source-highlighter: unknown + + [source, ruby] + ---- + puts 'Hello, World!' + ---- + EOS + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE + assert_css 'pre.highlight', output, 1 + assert_css 'pre.highlight > code.language-ruby', output, 1 + assert_css 'pre.highlight > code.language-ruby[data-lang="ruby"]', output, 1 + end + + test 'should highlight source if source highlighter is set even if language is not set' do + input = <<~'EOS' + :source-highlighter: coderay + + [source] + ---- + [numbers] + one + two + three + ---- + EOS + output = convert_string input, safe: :safe + assert_css 'pre.CodeRay.highlight', output, 1 + assert_includes output, '' + end + + test 'should not crash if source block has no lines and source highlighter is set' do + input = <<~'EOS' + :source-highlighter: coderay + + [source,text] + ---- + ---- + EOS + output = convert_string_to_embedded input, safe: :safe + assert_css 'pre.CodeRay', output, 1 + assert_css 'pre.CodeRay > code', output, 1 + assert_css 'pre.CodeRay > code:empty', output, 1 + end + + test 'should highlight source inside AsciiDoc table cell if source-highlighter attribute is set' do + input = <<~'EOS' + :source-highlighter: coderay + + |=== + a| + [source, ruby] + ---- + require 'coderay' + + html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table + ---- + |=== + EOS + output = convert_string_to_embedded input, safe: :safe + assert_xpath '/table//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 + end + + test 'should set starting line number in DocBook output if linenums option is enabled and start attribute is set' do + input = <<~'EOS' + [source%linenums,java,start=3] + ---- + public class HelloWorld { + public static void main(String[] args) { + out.println("Hello, World!"); + } + } + ---- + EOS + + output = convert_string_to_embedded input, backend: :docbook, safe: Asciidoctor::SafeMode::SAFE + assert_css 'programlisting[startinglinenumber]', output, 1 + assert_css 'programlisting[startinglinenumber="3"]', output, 1 + end + + test 'should read source language from source-language document attribute if not specified on source block' do + input = <<~'EOS' + :source-highlighter: coderay + :source-language: ruby + + [source] + ---- + require 'coderay' + + html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table + ---- + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE, linkcss_default: true + assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 + end + + test 'should rename document attribute named language to source-language when compat-mode is enabled' do + input = <<~'EOS' + :language: ruby + + {source-language} + EOS + + assert_equal 'ruby', (convert_inline_string input, attributes: { 'compat-mode' => '' }) + + input = <<~'EOS' + :language: ruby + + {source-language} + EOS + + assert_equal '{source-language}', (convert_inline_string input) + end + + context 'CodeRay' do + test 'should highlight source if source-highlighter attribute is set' do + input = <<~'EOS' + :source-highlighter: coderay + + [source, ruby] + ---- + require 'coderay' + + html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table + ---- + EOS + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, linkcss_default: true + assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 + assert_match(/\.CodeRay *\{/, output) + end + + test 'should not fail if source language is invalid' do + input = <<~'EOS' + :source-highlighter: coderay + + [source, n/a] + ---- + PRINT 'yo' + ---- + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_css 'code[data-lang="n/a"]', output, 1 + end + + test 'should number lines if third positional attribute is set' do + input = <<~'EOS' + :source-highlighter: coderay + + [source,ruby,linenums] + ---- + puts 'Hello, World!' + ---- + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_xpath '//td[@class="line-numbers"]', output, 1 + end + + test 'should number lines if linenums option is set on source block' do + input = <<~'EOS' + :source-highlighter: coderay + + [source%linenums,ruby] + ---- + puts 'Hello, World!' + ---- + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_xpath '//td[@class="line-numbers"]', output, 1 + end + + test 'should number lines of source block if source-linenums-option document attribute is set' do + input = <<~'EOS' + :source-highlighter: coderay + :source-linenums-option: + + [source,ruby] + ---- + puts 'Hello, World!' + ---- + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_xpath '//td[@class="line-numbers"]', output, 1 + end + + test 'should set starting line number in HTML output if linenums option is enabled and start attribute is set' do + input = <<~'EOS' + :source-highlighter: coderay + :coderay-linenums-mode: inline + + [source%linenums,ruby,start=10] + ---- + puts 'Hello, World!' + ---- + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_xpath '//span[@class="line-numbers"]', output, 1 + assert_xpath '//span[@class="line-numbers"][text()="10"]', output, 1 + end + + test 'should highlight lines specified in highlight attribute if linenums is set and source-highlighter is coderay' do + %w(highlight="1,4-6" highlight=1;4..6 highlight=1;4..;!7).each do |highlight_attr| + input = <<~EOS + :source-highlighter: coderay + + [source%linenums,java,#{highlight_attr}] + ---- + import static java.lang.System.out; + + public class HelloWorld { + public static void main(String[] args) { + out.println("Hello, World!"); + } + } + ---- + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_css 'strong.highlighted', output, 4 + assert_xpath '//strong[@class="highlighted"][text()="1"]', output, 1 + assert_xpath '//strong[@class="highlighted"][text()="2"]', output, 0 + assert_xpath '//strong[@class="highlighted"][text()="3"]', output, 0 + assert_xpath '//strong[@class="highlighted"][text()="4"]', output, 1 + assert_xpath '//strong[@class="highlighted"][text()="5"]', output, 1 + assert_xpath '//strong[@class="highlighted"][text()="6"]', output, 1 + assert_xpath '//strong[@class="highlighted"][text()="7"]', output, 0 + end + end + + test 'should replace callout marks but not highlight them if source-highlighter attribute is coderay' do + input = <<~'EOS' + :source-highlighter: coderay + + [source, ruby] + ---- + require 'coderay' # <1> + + html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table # <2> + puts html # <3> <4> + exit 0 # <5><6> + ---- + <1> Load library + <2> Highlight source + <3> Print to stdout + <4> Redirect to a file to capture output + <5> Exit program + <6> Reports success + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_match(/coderay<\/span>.* # \(1\)<\/b>$/, output) + assert_match(/puts 'Hello, world!'<\/span>.* # \(2\)<\/b>$/, output) + assert_match(/puts html.* # \(3\)<\/b> \(4\)<\/b>$/, output) + assert_match(/exit.* # \(5\)<\/b> \(6\)<\/b><\/code>/, output) + end + + test 'should support autonumbered callout marks if source-highlighter attribute is coderay' do + input = <<~'EOS' + :source-highlighter: coderay + + [source, ruby] + ---- + require 'coderay' # <.><.> + + html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table # <.> + puts html # <.> + ---- + <.> Load library + <.> Gem must be installed + <.> Highlight source + <.> Print to stdout + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_match(/coderay<\/span>.* # \(1\)<\/b> \(2\)<\/b>$/, output) + assert_match(/puts 'Hello, world!'<\/span>.* # \(3\)<\/b>$/, output) + assert_match(/puts html.* # \(4\)<\/b><\/code>/, output) + assert_css '.colist ol', output, 1 + assert_css '.colist ol li', output, 4 + end + + test 'should restore callout marks to correct lines if source highlighter is coderay and table line numbering is enabled' do + input = <<~'EOS' + :source-highlighter: coderay + :coderay-linenums-mode: table + + [source, ruby, numbered] + ---- + require 'coderay' # <1> + + html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table # <2> + puts html # <3> <4> + exit 0 # <5><6> + ---- + <1> Load library + <2> Highlight source + <3> Print to stdout + <4> Redirect to a file to capture output + <5> Exit program + <6> Reports success + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_match(/coderay<\/span>.* # \(1\)<\/b>$/, output) + assert_match(/puts 'Hello, world!'<\/span>.* # \(2\)<\/b>$/, output) + assert_match(/puts html.* # \(3\)<\/b> \(4\)<\/b>$/, output) + # NOTE notice there's a newline before the closing
    tag + assert_match(/exit.* # \(5\)<\/b> \(6\)<\/b>\n<\/pre>/, output) + end + + test 'should restore isolated callout mark on last line of source when source highlighter is coderay' do + input = <<~'EOS' + :source-highlighter: coderay + + [source,ruby,linenums] + ---- + require 'app' + + launch_app + # <1> + ---- + <1> Profit. + EOS + + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + # NOTE notice there's a newline before the closing
    tag + assert_match(/\n# \(1\)<\/b>\n<\/pre>/, output) + end + + test 'should preserve space before callout on final line' do + inputs = [] + + inputs << <<~'EOS' + [source,yaml] + ---- + a: 'a' + key: 'value' #<1> + ---- + <1> key-value pair + EOS + + inputs << <<~'EOS' + [source,ruby] + ---- + puts 'hi' + puts 'value' #<1> + ---- + <1> print to stdout + EOS + + inputs << <<~'EOS' + [source,python] + ---- + print 'hi' + print 'value' #<1> + ---- + <1> print to stdout + EOS + + inputs.each do |input| + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'source-highlighter' => 'coderay' } + output = output.gsub(/<\/?span.*?>/, '') + assert_includes output, '\'value\' #(1)' + end + end + + test 'should preserve passthrough placeholders when highlighting source using coderay' do + input = <<~'EOS' + :source-highlighter: coderay + + [source,java] + [subs="specialcharacters,macros,callouts"] + ---- + public class Printer { + public static void main(String[] args) { + System.pass:quotes[_out_].println("*asterisks* make text pass:quotes[*bold*]"); + } + } + ---- + EOS + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE + assert_match(/\.out<\/em>\./, output, 1) + assert_match(/\*asterisks\*/, output, 1) + assert_match(/bold<\/strong>/, output, 1) + refute_includes output, Asciidoctor::Substitutors::PASS_START + end + + test 'should link to CodeRay stylesheet if source-highlighter is coderay and linkcss is set' do + input = <<~'EOS' + :source-highlighter: coderay + + [source, ruby] + ---- + require 'coderay' + + html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table + ---- + EOS + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'linkcss' => '' } + assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 + assert_css 'link[rel="stylesheet"][href="./coderay-asciidoctor.css"]', output, 1 + end + + test 'should highlight source inline if source-highlighter attribute is coderay and coderay-css is style' do + input = <<~'EOS' + :source-highlighter: coderay + :coderay-css: style + + [source, ruby] + ---- + require 'coderay' + + html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table + ---- + EOS + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, linkcss_default: true + assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@style = "color:#036;font-weight:bold"][text() = "CodeRay"]', output, 1 + refute_match(/\.CodeRay \{/, output) + end + + test 'should read stylesheet' do + css = (Asciidoctor::SyntaxHighlighter.for 'coderay').read_stylesheet + refute_nil css + assert_includes css, 'pre.CodeRay{background:#f7f7f8}' + end + end + + context 'Highlight.js' do + test 'should include remote highlight.js assets if source-highlighter attribute is highlight.js' do + input = <<~'EOS' + :source-highlighter: highlight.js + + [source,html] + ---- +

    Highlight me!

    + ---- + EOS + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE + assert_css 'pre.highlightjs.highlight', output, 1 + assert_css 'pre.highlightjs.highlight > code.language-html.hljs[data-lang="html"]', output, 1 + assert_includes output, '<p>Highlight me!</p>' + assert_css '#footer ~ link[href*="highlight.js"]', output, 1 + assert_css '#footer ~ script[src*="highlight.min.js"]', output, 1 + assert_xpath '//script[text()="hljs.initHighlighting()"]', output, 1 + end + + test 'should add language-none class to source block when source-highlighter is highlight.js and language is not set' do + input = <<~'EOS' + :source-highlighter: highlight.js + + [source] + ---- + [numbers] + one + two + three + ---- + EOS + output = convert_string input, safe: :safe + assert_css 'code.language-none', output, 1 + end + + test 'should load additional languages specified by highlightjs-languages' do + input = <<~'EOS' + :source-highlighter: highlight.js + :highlightjs-languages: yaml, scilab + + [source,yaml] + ---- + key: value + ---- + EOS + output = convert_string input, safe: Asciidoctor::SafeMode::SAFE + assert_css '#footer ~ script[src*="languages/yaml.min.js"]', output, 1 + assert_css '#footer ~ script[src*="languages/scilab.min.js"]', output, 1 + end + end + + context 'Prettify' do + test 'should add language classes to child code element when source-highlighter is prettify' do + input = <<~'EOS' + [source,ruby] + ---- + puts "foo" + ---- + EOS + + output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'prettify' } + assert_css 'pre[class="prettyprint highlight"]', output, 1 + assert_css 'pre > code[data-lang="ruby"]', output, 1 + end + + test 'should set linenums start if linenums are enabled and start attribute is set when source-highlighter is prettify' do + input = <<~'EOS' + [source%linenums,ruby,start=5] + ---- + puts "foo" + ---- + EOS + + output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'prettify' } + assert_css 'pre[class="prettyprint highlight linenums:5"]', output, 1 + assert_css 'pre > code[data-lang="ruby"]', output, 1 + end + end + + context 'HTML Pipeline' do + test 'should set lang attribute on pre when source-highlighter is html-pipeline' do + input = <<~'EOS' + [source,ruby] + ---- + filters = [ + HTML::Pipeline::AsciiDocFilter, + HTML::Pipeline::SanitizationFilter, + HTML::Pipeline::SyntaxHighlightFilter + ] + + puts HTML::Pipeline.new(filters, {}).call(input)[:output] + ---- + EOS + + output = convert_string input, attributes: { 'source-highlighter' => 'html-pipeline' } + assert_css 'pre[lang="ruby"]', output, 1 + assert_css 'pre[lang="ruby"] > code', output, 1 + assert_css 'pre[class]', output, 0 + assert_css 'code[class]', output, 0 + end + end + + context 'Rouge' do + test 'should syntax highlight source if source-highlighter attribute is set' do + input = <<~'EOS' + :source-highlighter: rouge + + [source,ruby] + ---- + require 'rouge' + + html = Rouge::Formatters::HTML.new.format(Rouge::Lexers::Ruby.new.lex('puts "Hello, world!"')) + ---- + EOS + output = convert_string input, safe: :safe, linkcss_default: true + assert_xpath '//pre[@class="rouge highlight"]/code[@data-lang="ruby"]/span[@class="no"][text()="Rouge"]', output, 2 + assert_includes output, 'pre.rouge .no {' + end + + test 'should not crash if source-highlighter attribute is set and source block does not define a language' do + input = <<~'EOS' + :source-highlighter: rouge + + [source] + ---- + require 'rouge' + + html = Rouge::Formatters::HTML.new.format(Rouge::Lexers::Ruby.new.lex('puts "Hello, world!"')) + ---- + EOS + output = convert_string_to_embedded input, safe: :safe + assert_css 'pre > code:not([data-lang])', output, 1 + end + + test 'should default to plain text lexer if lexer cannot be resolved for language' do + input = <<~'EOS' + :source-highlighter: rouge + + [source,lolcode] + ---- + CAN HAS STDIO? + PLZ OPEN FILE "LOLCATS.TXT"? + KTHXBYE + ---- + EOS + output = convert_string_to_embedded input, safe: :safe + assert_css 'code[data-lang=lolcode]', output, 1 + assert_css 'code span', output, 0 + assert_xpath %(//code[text()='CAN HAS STDIO?\nPLZ OPEN FILE "LOLCATS.TXT"?\nKTHXBYE']), output, 1 + end + + test 'should honor cgi-style options on language' do + input = <<~'EOS' + :source-highlighter: rouge + + [source,"console?prompt=$> "] + ---- + $> asciidoctor --version + ---- + EOS + output = convert_string_to_embedded input, safe: :safe + assert_css 'code[data-lang=console]', output, 1 + assert_css 'code span.gp', output, 1 + end + + test 'should set starting line number to 1 by default in HTML output if linenums option is enabled' do + input = <<~'EOS' + [source%linenums,ruby] + ---- + puts 'Hello, World!' + puts 'Goodbye, World!' + ---- + EOS + output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'rouge' } + assert_css 'table.linenotable', output, 1 + assert_css 'table.linenotable td.linenos', output, 1 + assert_css 'table.linenotable td.linenos pre.lineno', output, 1 + assert_css 'table.linenotable td.code', output, 1 + assert_css 'table.linenotable td.code pre:not([class])', output, 1 + assert_xpath %(//pre[@class="lineno"][text()="1\n2\n"]), output, 1 + end + + test 'should set starting line number in HTML output if linenums option is enabled and start attribute is set' do + input = <<~'EOS' + [source%linenums,ruby,start=9] + ---- + puts 'Hello, World!' + puts 'Goodbye, World!' + ---- + EOS + output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'rouge' } + assert_css 'table.linenotable', output, 1 + assert_css 'table.linenotable td.linenos', output, 1 + assert_css 'table.linenotable td.linenos pre.lineno', output, 1 + assert_css 'table.linenotable td.code', output, 1 + assert_css 'table.linenotable td.code pre:not([class])', output, 1 + assert_xpath %(//pre[@class="lineno"][text()=" 9\n10\n"]), output, 1 + end + + test 'should restore callout marks to correct lines' do + ['', '%linenums'].each do |opts| + input = <<~EOS + :source-highlighter: rouge + + [source#{opts},ruby] + ---- + require 'rouge' # <1> + + html = Rouge::Formatters::HTML.new.format(Rouge::Lexers::Ruby.new.lex('puts "Hello, world!"')) # <2> + puts html # <3> <4> + exit 0 # <5><6> + ---- + <1> Load library + <2> Highlight source + <3> Print to stdout + <4> Redirect to a file to capture output + <5> Exit program + <6> Reports success + EOS + output = convert_string_to_embedded input, safe: :safe + assert_match(/'rouge'<\/span>.* # \(1\)<\/b>$/, output) + assert_match(/'puts "Hello, world!"'<\/span>.* # \(2\)<\/b>$/, output) + assert_match(/html<\/span>.* # \(3\)<\/b> \(4\)<\/b>$/, output) + # NOTE notice there's a newline before the closing tag when linenums are enabled + assert_match(/0<\/span>.* # \(5\)<\/b> \(6\)<\/b>#{opts == '%linenums' ? ?\n : '
    '}<\/pre>/, output) + end + end + + test 'should line highlight specified lines when last line is not highlighted' do + ['', '%linenums'].each do |opts| + input = <<~EOS + :source-highlighter: rouge + + [source#{opts},ruby,highlight=1] + ---- + puts 'Hello, world!' + puts 'Goodbye, world!' + ---- + EOS + # NOTE notice the newline in inside the closing of the highlight span + expected = <<~EOS.chop + puts 'Hello, world!' + puts 'Goodbye, world!'#{opts == '%linenums' ? ?\n : '
    '} + EOS + + output = convert_string_to_embedded input, safe: :safe + assert_includes output, expected + end + end + + test 'should line highlight specified lines when last line is highlighted' do + ['', '%linenums'].each do |opts| + input = <<~EOS + :source-highlighter: rouge + + [source#{opts},ruby,highlight=2] + ---- + puts 'Hello, world!' + puts 'Goodbye, world!' + ---- + EOS + # NOTE notice the newline in inside the closing of the highlight span + expected = <<~EOS.chop + puts 'Hello, world!' + puts 'Goodbye, world!' + #{opts == '%linenums' ? '' : ''} + EOS + + output = convert_string_to_embedded input, safe: :safe + assert_includes output, expected + end + end + + test 'should restore callout marks to correct lines if line numbering and line highlighting are enabled' do + [1, 2].each do |highlight| + input = <<~EOS + :source-highlighter: rouge + + [source%linenums,ruby,highlight=#{highlight}] + ---- + require 'rouge' # <1> + exit 0 # <2> + ---- + <1> Load library + <2> Exit program + EOS + output = convert_string_to_embedded input, safe: :safe + assert_match(/'rouge'<\/span>.* # \(1\)<\/b>$/, output) + # NOTE notice there's a newline before the closing tag + assert_match(/0<\/span>.* # \(2\)<\/b>\n#{highlight == 2 ? '' : ''}<\/pre>/, output) + end + end + + test 'should gracefully fallback to default style if specified style not recognized' do + input = <<~'EOS' + :source-highlighter: rouge + :rouge-style: unknown + + [source,ruby] + ---- + puts 'Hello, world!' + ---- + EOS + output = convert_string input, safe: :safe, linkcss_default: true + assert_css 'pre.rouge', output, 1 + assert_includes output, 'pre.rouge .no {' + assert_match %r/pre\.rouge \{\s*background-color: #f8f8f8;/m, output + end + + test 'should restore isolated callout mark on last line of source' do + input = <<~'EOS' + :source-highlighter: rouge + + [source%linenums,ruby] + ---- + require 'app' + + launch_app + # <1> + ---- + <1> Profit. + EOS + + output = convert_string_to_embedded input, safe: :safe + # NOTE notice there's a newline before the closing tag, but not before the closing tag + assert_match(/\n# \(1\)<\/b>\n<\/pre><\/td>/, output) + end + + test 'should number all lines when isolated callout mark is on last line of source and starting line number is set' do + input = <<~'EOS' + :source-highlighter: rouge + + [source%linenums,ruby,start=5] + ---- + require 'app' + + launch_app + # <1> + ---- + <1> Profit. + EOS + + output = convert_string_to_embedded input, safe: :safe + assert_xpath %(//pre[@class="lineno"][text()="5\n6\n7\n8\n"]), output, 1 + # NOTE notice there's a newline before the closing tag, but not before the closing tag + assert_match(/\n# \(1\)<\/b>\n<\/pre><\/td>/, output) + end + + test 'should read stylesheet for specified style' do + css = (Asciidoctor::SyntaxHighlighter.for 'rouge').read_stylesheet 'monokai' + refute_nil css + assert_includes css, 'pre.rouge {' + assert_includes css, 'background-color: #49483e;' + end + end + + context 'Pygments' do + test 'should syntax highlight source if source-highlighter attribute is set' do + input = <<~'EOS' + :source-highlighter: pygments + :pygments-style: monokai + + [source,python] + ---- + from pygments import highlight + from pygments.lexers import PythonLexer + from pygments.formatters import HtmlFormatter + + source = 'print "Hello World"' + print(highlight(source, PythonLexer(), HtmlFormatter())) + ---- + EOS + output = convert_string input, safe: :safe, linkcss_default: true + assert_xpath '//pre[@class="pygments highlight"]/code[@data-lang="python"]/span[@class="tok-kn"][text()="import"]', output, 3 + assert_includes output, 'pre.pygments ' + end + + test 'should gracefully fallback to default style if specified style not recognized' do + input = <<~'EOS' + :source-highlighter: pygments + :pygments-style: unknown + + [source,python] + ---- + from pygments import highlight + from pygments.lexers import PythonLexer + from pygments.formatters import HtmlFormatter + + source = 'print "Hello World"' + print(highlight(source, PythonLexer(), HtmlFormatter())) + ---- + EOS + output = convert_string input, safe: :safe, linkcss_default: true + assert_css 'pre.pygments', output, 1 + assert_includes output, 'pre.pygments ' + assert_includes output, '.tok-c { color: #408080;' + end + + test 'should number lines if linenums option is set on source block' do + input = <<~'EOS' + :source-highlighter: pygments + + [source%linenums,ruby] + ---- + puts 'Hello, World!' + puts 'Goodbye, World!' + ---- + EOS + output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE + assert_css 'table.linenotable', output, 1 + assert_css 'table.linenotable td.linenos', output, 1 + assert_css 'table.linenotable td.linenos .linenodiv', output, 1 + assert_css 'table.linenotable td.linenos .linenodiv pre:not([class])', output, 1 + assert_css 'table.linenotable td.code', output, 1 + assert_css 'table.linenotable td.code pre:not([class])', output, 1 + assert_xpath %(//*[@class="linenodiv"]/pre[text()="1\n2"]), output, 1 + end + + test 'should restore callout marks to correct lines if table line numbering is enabled' do + input = <<~'EOS' + :source-highlighter: pygments + :pygments-linenums-mode: table + + [source%linenums,ruby] + ---- + from pygments import highlight # <1> + from pygments.lexers import PythonLexer + from pygments.formatters import HtmlFormatter + + code = 'print "Hello World"' + print(highlight(code, PythonLexer(), HtmlFormatter())) # <2><3> + ---- + <1> Load library + <2> Highlight source + <3> Print to stdout + EOS + output = convert_string_to_embedded input, safe: :safe + assert_match(/highlight<\/span> # \(1\)<\/b>$/, output) + # NOTE notice there's a newline before the closing tag + assert_match(/\(\)\)\).*<\/span> # \(2\)<\/b> \(3\)<\/b>$/, output) + end + + test 'should restore isolated callout mark on last line of source' do + input = <<~'EOS' + :source-highlighter: pygments + + [source,ruby,linenums] + ---- + require 'app' + + launch_app + # <1> + ---- + <1> Profit. + EOS + + output = convert_string_to_embedded input, safe: :safe + # NOTE notice there's a newline before the closing tag, but not before the closing tag + assert_match(/\n# \(1\)<\/b>\n<\/pre><\/td>/, output) + end + + test 'should not hardcode inline styles on lineno div and pre elements when linenums are enabled in table mode' do + input = <<~'EOS' + :source-highlighter: pygments + :pygments-css: inline + + [source%linenums,ruby] + ---- + puts 'Hello, World!' + ---- + EOS + + output = convert_string_to_embedded input, safe: :safe + assert_css 'td.linenos', output, 1 + assert_css 'div.linenodiv:not([style])', output, 1 + assert_includes output, '
    '
    +      assert_css 'pre:not([style])', output, 2
    +    end
    +
    +    test 'should not hardcode inline styles on lineno spans when linenums are enabled and source-highlighter is pygments' do
    +      input = <<~'EOS'
    +      :source-highlighter: pygments
    +      :pygments-css: inline
    +      :pygments-linenums-mode: inline
    +
    +      [source%linenums,ruby]
    +      ----
    +      puts 'Hello, World!'
    +      puts 'Hello, World!'
    +      puts 'Hello, World!'
    +      puts 'Hello, World!'
    +      puts 'Hello, World!'
    +      puts 'Hello, World!'
    +      puts 'Hello, World!'
    +      puts 'Hello, World!'
    +      puts 'Hello, World!'
    +      exit 0
    +      ----
    +      EOS
    +
    +      output = convert_string_to_embedded input, safe: :safe
    +      assert_css 'table.linenotable', output, 0
    +      assert_css 'pre', output, 1
    +      assert_includes output, ' 1 '
    +      assert_includes output, '10 '
    +    end
    +
    +    test 'should line highlight specified lines' do
    +      input = <<~'EOS'
    +      :source-highlighter: pygments
    +
    +      [source,ruby,highlight=1..2]
    +      ----
    +      puts 'Hello, world!'
    +      puts 'Goodbye, world!'
    +      ----
    +      EOS
    +      # NOTE notice the newline is inside the closing  of the highlight span
    +      expected = <<~'EOS'.chop
    +      
    puts 'Hello, world!'
    +      puts 'Goodbye, world!'
    +      
    + EOS + + output = convert_string_to_embedded input, safe: :safe + assert_includes output, expected + end + end if ENV['PYGMENTS'] +end diff -Nru asciidoctor-1.5.5/test/tables_test.rb asciidoctor-2.0.10/test/tables_test.rb --- asciidoctor-1.5.5/test/tables_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/tables_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,73 +1,97 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context 'Tables' do - context 'PSV' do - test 'renders simple psv table' do - input = <<-EOS -|======= -|A |B |C -|a |b |c -|1 |2 |3 -|======= + test 'converts simple psv table' do + input = <<~'EOS' + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= EOS cells = [%w(A B C), %w(a b c), %w(1 2 3)] - doc = document_from_string input, :header_footer => false + doc = document_from_string input, standalone: false table = doc.blocks[0] assert 100, table.columns.map {|col| col.attributes['colpcwidth'] }.reduce(:+) output = doc.convert assert_css 'table', output, 1 - assert_css 'table.tableblock.frame-all.grid-all.spread', output, 1 + assert_css 'table.tableblock.frame-all.grid-all.stretch', output, 1 assert_css 'table > colgroup > col[style*="width: 33.3333%"]', output, 2 assert_css 'table > colgroup > col:last-of-type[style*="width: 33.3334%"]', output, 1 assert_css 'table tr', output, 3 assert_css 'table > tbody > tr', output, 3 assert_css 'table td', output, 9 assert_css 'table > tbody > tr > td.tableblock.halign-left.valign-top > p.tableblock', output, 9 - cells.each_with_index {|row, rowi| + cells.each_with_index do |row, rowi| assert_css "table > tbody > tr:nth-child(#{rowi + 1}) > td", output, row.size assert_css "table > tbody > tr:nth-child(#{rowi + 1}) > td > p", output, row.size - row.each_with_index {|cell, celli| + row.each_with_index do |cell, celli| assert_xpath "(//tr)[#{rowi + 1}]/td[#{celli + 1}]/p[text()='#{cell}']", output, 1 - } - } + end + end end - test 'renders caption on simple psv table' do - input = <<-EOS -.Simple psv table -|======= -|A |B |C -|a |b |c -|1 |2 |3 -|======= + test 'should add direction CSS class if float attribute is set on table' do + input = <<~'EOS' + [float=left] + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= + EOS + + output = convert_string_to_embedded input + assert_css 'table.left', output, 1 + end + + test 'should set stripes class if stripes option is set' do + input = <<~'EOS' + [stripes=odd] + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= + EOS + + output = convert_string_to_embedded input + assert_css 'table.stripes-odd', output, 1 + end + + test 'outputs a caption on simple psv table' do + input = <<~'EOS' + .Simple psv table + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/table/caption[@class="title"][text()="Table 1. Simple psv table"]', output, 1 assert_xpath '/table/caption/following-sibling::colgroup', output, 1 end test 'only increments table counter for tables that have a title' do - input = <<-EOS -.First numbered table -|======= -|1 |2 |3 -|======= - -|======= -|4 |5 |6 -|======= - -.Second numbered table -|======= -|7 |8 |9 -|======= + input = <<~'EOS' + .First numbered table + |======= + |1 |2 |3 + |======= + + |======= + |4 |5 |6 + |======= + + .Second numbered table + |======= + |7 |8 |9 + |======= EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table:root', output, 3 assert_xpath '(/table)[1]/caption', output, 1 assert_xpath '(/table)[1]/caption[text()="Table 1. First numbered table"]', output, 1 @@ -76,28 +100,74 @@ assert_xpath '(/table)[3]/caption[text()="Table 2. Second numbered table"]', output, 1 end - test 'renders explicit caption on simple psv table' do - input = <<-EOS -[caption="All the Data. "] -.Simple psv table -|======= -|A |B |C -|a |b |c -|1 |2 |3 -|======= + test 'uses explicit caption in front of title in place of default caption and number' do + input = <<~'EOS' + [caption="All the Data. "] + .Simple psv table + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/table/caption[@class="title"][text()="All the Data. Simple psv table"]', output, 1 assert_xpath '/table/caption/following-sibling::colgroup', output, 1 end + test 'disables caption when caption attribute on table is empty' do + input = <<~'EOS' + [caption=] + .Simple psv table + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= + EOS + output = convert_string_to_embedded input + assert_xpath '/table/caption[@class="title"][text()="Simple psv table"]', output, 1 + assert_xpath '/table/caption/following-sibling::colgroup', output, 1 + end + + test 'disables caption when caption attribute on table is empty string' do + input = <<~'EOS' + [caption=""] + .Simple psv table + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= + EOS + output = convert_string_to_embedded input + assert_xpath '/table/caption[@class="title"][text()="Simple psv table"]', output, 1 + assert_xpath '/table/caption/following-sibling::colgroup', output, 1 + end + + test 'disables caption on table when table-caption document attribute is unset' do + input = <<~'EOS' + :!table-caption: + + .Simple psv table + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= + EOS + output = convert_string_to_embedded input + assert_xpath '/table/caption[@class="title"][text()="Simple psv table"]', output, 1 + assert_xpath '/table/caption/following-sibling::colgroup', output, 1 + end + test 'ignores escaped separators' do - input = <<-EOS -|=== -|A \\| here| a \\| there -|=== + input = <<~'EOS' + |=== + |A \| here| a \| there + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 1 @@ -107,15 +177,15 @@ end test 'preserves escaped delimiters at the end of the line' do - input = <<-EOS -[%header,cols="1,1"] -|==== -|A |B\\| -|A1 |B1\\| -|A2 |B2\\| -|==== + input = <<~'EOS' + [%header,cols="1,1"] + |=== + |A |B\| + |A1 |B1\| + |A2 |B2\| + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead > tr', output, 1 @@ -129,14 +199,14 @@ end test 'should treat trailing pipe as an empty cell' do - input = <<-EOS -|==== -|A1 | -|B1 |B2 -|C1 |C2 -|==== + input = <<~'EOS' + |=== + |A1 | + |B1 |B2 + |C1 |C2 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 @@ -147,75 +217,219 @@ end test 'should auto recover with warning if missing leading separator on first cell' do - input = <<-EOS -|=== -A | here| a | there -|=== - EOS - output = render_embedded_string input - assert_css 'table', output, 1 - assert_css 'table > colgroup > col', output, 4 - assert_css 'table > tbody > tr', output, 1 - assert_css 'table > tbody > tr > td', output, 4 - assert_xpath '/table/tbody/tr/td[1]/p[text()="A"]', output, 1 - assert_xpath '/table/tbody/tr/td[2]/p[text()="here"]', output, 1 - assert_xpath '/table/tbody/tr/td[3]/p[text()="a"]', output, 1 - assert_xpath '/table/tbody/tr/td[4]/p[text()="there"]', output, 1 + input = <<~'EOS' + |=== + A | here| a | there + | x + | y + | z + | end + |=== + EOS + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > tbody > tr', output, 2 + assert_css 'table > tbody > tr > td', output, 8 + assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A"]', output, 1 + assert_xpath '/table/tbody/tr[1]/td[2]/p[text()="here"]', output, 1 + assert_xpath '/table/tbody/tr[1]/td[3]/p[text()="a"]', output, 1 + assert_xpath '/table/tbody/tr[1]/td[4]/p[text()="there"]', output, 1 + assert_message logger, :ERROR, ': line 2: table missing leading separator; recovering automatically', Hash + end end test 'performs normal substitutions on cell content' do - input = <<-EOS -:show_title: Cool new show -|=== -|{show_title} |Coming soon... -|=== + input = <<~'EOS' + :show_title: Cool new show + |=== + |{show_title} |Coming soon... + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//tbody/tr/td[1]/p[text()="Cool new show"]', output, 1 - assert_xpath %(//tbody/tr/td[2]/p[text()='Coming soon#{expand_entity 8230}#{expand_entity 8203}']), output, 1 + assert_xpath %(//tbody/tr/td[2]/p[text()='Coming soon#{decode_char 8230}#{decode_char 8203}']), output, 1 end - test 'table and col width not assigned when autowidth option is specified' do - input = <<-EOS -[options="autowidth"] -|======= -|A |B |C -|a |b |c -|1 |2 |3 -|======= + test 'should only substitute specialchars for literal table cells' do + input = <<~'EOS' + |=== + l|one + *two* + three + + |=== + EOS + output = convert_string_to_embedded input + result = xmlnodes_at_xpath('/table//pre', output, 1) + assert_equal %(
    one\n*two*\nthree\n<four>
    ), result.to_s + end + + test 'should preserving leading spaces but not leading newlines or trailing spaces in literal table cells' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [cols=2*] + |=== + l| + one + two + three + + | normal + |=== + EOS + output = convert_string_to_embedded input + result = xmlnodes_at_xpath('/table//pre', output, 1) + assert_equal %(
      one\n  two\nthree
    ), result.to_s + end + + test 'should ignore v table cell style' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + [cols=2*] + |=== + v| + one + two + three + + | normal + |=== + EOS + output = convert_string_to_embedded input + result = xmlnodes_at_xpath('/table//p[@class="tableblock"]', output, 1) + assert_equal %(

    one\n two\nthree

    ), result.to_s + end + + test 'table and column width not assigned when autowidth option is specified' do + input = <<~'EOS' + [options="autowidth"] + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 + assert_css 'table.fit-content', output, 1 assert_css 'table[style*="width"]', output, 0 assert_css 'table colgroup col', output, 3 - assert_css 'table colgroup col[width]', output, 0 + assert_css 'table colgroup col[style*="width"]', output, 0 + end + + test 'does not assign column width for autowidth columns in HTML output' do + input = <<~'EOS' + [cols="15%,3*~"] + |======= + |A |B |C |D + |a |b |c |d + |1 |2 |3 |4 + |======= + EOS + doc = document_from_string input + table_row0 = doc.blocks[0].rows.body[0] + assert_equal 15, table_row0[0].attributes['width'] + assert_equal 15, table_row0[0].attributes['colpcwidth'] + refute_equal '', table_row0[0].attributes['autowidth-option'] + expected_pcwidths = { 1 => 28.3333, 2 => 28.3333, 3 => 28.3334 } + (1..3).each do |i| + assert_equal 28.3333, table_row0[i].attributes['width'] + assert_equal expected_pcwidths[i], table_row0[i].attributes['colpcwidth'] + assert_equal '', table_row0[i].attributes['autowidth-option'] + end + output = doc.convert standalone: false + assert_css 'table', output, 1 + assert_css 'table colgroup col', output, 4 + assert_css 'table colgroup col[style]', output, 1 + assert_css 'table colgroup col[style*="width: 15%"]', output, 1 + end + + test 'can assign autowidth to all columns even when table has a width' do + input = <<~'EOS' + [cols="4*~",width=50%] + |======= + |A |B |C |D + |a |b |c |d + |1 |2 |3 |4 + |======= + EOS + doc = document_from_string input + table_row0 = doc.blocks[0].rows.body[0] + (0..3).each do |i| + assert_equal 25, table_row0[i].attributes['width'] + assert_equal 25, table_row0[i].attributes['colpcwidth'] + assert_equal '', table_row0[i].attributes['autowidth-option'] + end + output = doc.convert standalone: false + assert_css 'table', output, 1 + assert_css 'table[style*="width: 50%;"]', output, 1 + assert_css 'table colgroup col', output, 4 + assert_css 'table colgroup col[style]', output, 0 + end + + test 'equally distributes remaining column width to autowidth columns in DocBook output' do + input = <<~'EOS' + [cols="15%,3*~"] + |======= + |A |B |C |D + |a |b |c |d + |1 |2 |3 |4 + |======= + EOS + output = convert_string_to_embedded input, backend: 'docbook5' + assert_css 'tgroup[cols="4"]', output, 1 + assert_css 'tgroup colspec', output, 4 + assert_css 'tgroup colspec[colwidth]', output, 4 + assert_css 'tgroup colspec[colwidth="15*"]', output, 1 + assert_css 'tgroup colspec[colwidth="28.3333*"]', output, 2 + assert_css 'tgroup colspec[colwidth="28.3334*"]', output, 1 + end + + test 'should compute column widths based on pagewidth when width is set on table in DocBook output' do + input = <<~'EOS' + :pagewidth: 500 + + [width=50%] + |======= + |A |B |C |D + + |a |b |c |d + |1 |2 |3 |4 + |======= + EOS + output = convert_string_to_embedded input, backend: 'docbook5' + assert_css 'tgroup[cols="4"]', output, 1 + assert_css 'tgroup colspec', output, 4 + assert_css 'tgroup colspec[colwidth]', output, 4 + assert_css 'tgroup colspec[colwidth="62.5*"]', output, 4 end test 'explicit table width is used even when autowidth option is specified' do - input = <<-EOS -[%autowidth,width=75%] -|======= -|A |B |C -|a |b |c -|1 |2 |3 -|======= + input = <<~'EOS' + [%autowidth,width=75%] + |======= + |A |B |C + |a |b |c + |1 |2 |3 + |======= EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table[style*="width"]', output, 1 assert_css 'table colgroup col', output, 3 - assert_css 'table colgroup col[width]', output, 0 + assert_css 'table colgroup col[style*="width"]', output, 0 end test 'first row sets number of columns when not specified' do - input = <<-EOS -|==== -|first |second |third |fourth -|1 |2 |3 -|4 -|==== + input = <<~'EOS' + |=== + |first |second |third |fourth + |1 |2 |3 + |4 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 4 assert_css 'table > tbody > tr', output, 2 @@ -224,67 +438,84 @@ end test 'colspec attribute using asterisk syntax sets number of columns' do - input = <<-EOS -[cols="3*"] -|=== -|A |B |C |a |b |c |1 |2 |3 -|=== + input = <<~'EOS' + [cols="3*"] + |=== + |A |B |C |a |b |c |1 |2 |3 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'table with explicit column count can have multiple rows on a single line' do - input = <<-EOS -[cols="3*"] -|=== -|one |two -|1 |2 |a |b -|=== + input = <<~'EOS' + [cols="3*"] + |=== + |one |two + |1 |2 |a |b + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 end test 'table with explicit deprecated colspec syntax can have multiple rows on a single line' do - input = <<-EOS -[cols="3"] -|=== -|one |two -|1 |2 |a |b -|=== + input = <<~'EOS' + [cols="3"] + |=== + |one |two + |1 |2 |a |b + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 end test 'columns are added for empty records in colspec attribute' do - input = <<-EOS -[cols="<,"] -|=== -|one |two -|1 |2 |a |b -|=== + input = <<~'EOS' + [cols="<,"] + |=== + |one |two + |1 |2 |a |b + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 end + test 'cols may be separated by semi-colon instead of comma' do + input = <<~'EOS' + [cols="1s;3m"] + |=== + | strong + | mono + |=== + EOS + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > colgroup > col', output, 2 + assert_css 'col[style="width: 25%;"]', output, 1 + assert_css 'col[style="width: 75%;"]', output, 1 + assert_xpath '(//td)[1]//strong', output, 1 + assert_xpath '(//td)[2]//code', output, 1 + end + test 'cols attribute may include spaces' do - input = <<-EOS -[cols=" 1, 1 "] -|=== -|one |two |1 |2 |a |b -|=== + input = <<~'EOS' + [cols=" 1, 1 "] + |=== + |one |two |1 |2 |a |b + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 50%;"]', output, 2 @@ -292,14 +523,14 @@ end test 'blank cols attribute should be ignored' do - input = <<-EOS -[cols=" "] -|=== -|one |two -|1 |2 |a |b -|=== + input = <<~'EOS' + [cols=" "] + |=== + |one |two + |1 |2 |a |b + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 50%;"]', output, 2 @@ -307,14 +538,14 @@ end test 'empty cols attribute should be ignored' do - input = <<-EOS -[cols=""] -|=== -|one |two -|1 |2 |a |b -|=== + input = <<~'EOS' + [cols=""] + |=== + |one |two + |1 |2 |a |b + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 50%;"]', output, 2 @@ -322,17 +553,17 @@ end test 'table with header and footer' do - input = <<-EOS -[frame="topbot",options="header,footer"] -|=== -|Item |Quantity -|Item 1 |1 -|Item 2 |2 -|Item 3 |3 -|Total |6 -|=== + input = <<~'EOS' + [frame="topbot",options="header,footer"] + |=== + |Item |Quantity + |Item 1 |1 + |Item 2 |2 + |Item 3 |3 + |Total |6 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 @@ -343,21 +574,23 @@ assert_css 'table > tfoot > tr > td', output, 2 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 3 + table_section_names = (xmlnodes_at_css 'table > *', output).map(&:node_name).select {|n| n.start_with? 't' } + assert_equal %w(thead tbody tfoot), table_section_names end test 'table with header and footer docbook' do - input = <<-EOS -.Table with header, body and footer -[frame="topbot",options="header,footer"] -|=== -|Item |Quantity -|Item 1 |1 -|Item 2 |2 -|Item 3 |3 -|Total |6 -|=== + input = <<~'EOS' + .Table with header, body and footer + [frame="topbot",options="header,footer"] + |=== + |Item |Quantity + |Item 1 |1 + |Item 2 |2 + |Item 3 |3 + |Total |6 + |=== EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_css 'table', output, 1 assert_css 'table[frame="topbot"]', output, 1 assert_css 'table > title', output, 1 @@ -376,36 +609,49 @@ assert_css 'table > tgroup > tbody', output, 1 assert_css 'table > tgroup > tbody > row', output, 3 assert_css 'table > tgroup > tbody > row', output, 3 + table_section_names = (xmlnodes_at_css 'table > tgroup > *', output).map(&:node_name).select {|n| n.start_with? 't' } + assert_equal %w(thead tbody tfoot), table_section_names + end + + test 'should recognize ends as an alias to topbot for frame when converting to DocBook' do + input = <<~'EOS' + [frame=ends] + |=== + |A |B |C + |=== + EOS + output = convert_string_to_embedded input, backend: 'docbook' + assert_css 'informaltable[frame="topbot"]', output, 1 end test 'table with landscape orientation in DocBook' do ['orientation=landscape', '%rotate'].each do |attrs| - input = <<-EOS -[#{attrs}] -|=== -|Column A | Column B | Column C -|=== + input = <<~EOS + [#{attrs}] + |=== + |Column A | Column B | Column C + |=== EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_css 'informaltable', output, 1 assert_css 'informaltable[orient="land"]', output, 1 end end test 'table with implicit header row' do - input = <<-EOS -|=== -|Column 1 |Column 2 - -|Data A1 -|Data B1 - -|Data A2 -|Data B2 -|=== + input = <<~'EOS' + |=== + |Column 1 |Column 2 + + |Data A1 + |Data B1 + + |Data A2 + |Data B2 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 @@ -415,17 +661,33 @@ assert_css 'table > tbody > tr', output, 2 end + test 'table with implicit header row only' do + input = <<~'EOS' + |=== + |Column 1 |Column 2 + + |=== + EOS + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > colgroup > col', output, 2 + assert_css 'table > thead', output, 1 + assert_css 'table > thead > tr', output, 1 + assert_css 'table > thead > tr > th', output, 2 + assert_css 'table > tbody', output, 0 + end + test 'table with implicit header row when other options set' do - input = <<-EOS -[%autowidth] -|=== -|Column 1 |Column 2 - -|Data A1 -|Data B1 -|=== + input = <<~'EOS' + [%autowidth] + |=== + |Column 1 |Column 2 + + |Data A1 + |Data B1 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table[style*="width"]', output, 0 assert_css 'table > colgroup > col', output, 2 @@ -437,17 +699,17 @@ end test 'no implicit header row if second line not blank' do - input = <<-EOS -|=== -|Column 1 |Column 2 -|Data A1 -|Data B1 - -|Data A2 -|Data B2 -|=== + input = <<~'EOS' + |=== + |Column 1 |Column 2 + |Data A1 + |Data B1 + + |Data A2 + |Data B2 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 @@ -455,21 +717,68 @@ assert_css 'table > tbody > tr', output, 3 end + test 'no implicit header row if cell in first line spans multiple lines' do + input = <<~'EOS' + [cols=2*] + |=== + |A1 + + + A1 continued|B1 + + |A2 + |B2 + |=== + EOS + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > colgroup > col', output, 2 + assert_css 'table > thead', output, 0 + assert_css 'table > tbody', output, 1 + assert_css 'table > tbody > tr', output, 2 + assert_xpath '(//td)[1]/p', output, 2 + end + + test 'no implicit header row if AsciiDoc cell in first line spans multiple lines' do + input = <<~'EOS' + [cols=2*] + |=== + a|contains AsciiDoc content + + * a + * b + * c + a|contains no AsciiDoc content + + just text + |A2 + |B2 + |=== + EOS + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > colgroup > col', output, 2 + assert_css 'table > thead', output, 0 + assert_css 'table > tbody', output, 1 + assert_css 'table > tbody > tr', output, 2 + assert_xpath '(//td)[1]//ul', output, 1 + end + test 'no implicit header row if first line blank' do - input = <<-EOS -|=== + input = <<~'EOS' + |=== -|Column 1 |Column 2 + |Column 1 |Column 2 -|Data A1 -|Data B1 + |Data A1 + |Data B1 -|Data A2 -|Data B2 + |Data A2 + |Data B2 -|=== + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 @@ -478,19 +787,19 @@ end test 'no implicit header row if noheader option is specified' do - input = <<-EOS -[%noheader] -|=== -|Column 1 |Column 2 - -|Data A1 -|Data B1 - -|Data A2 -|Data B2 -|=== + input = <<~'EOS' + [%noheader] + |=== + |Column 1 |Column 2 + + |Data A1 + |Data B1 + + |Data A2 + |Data B2 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 @@ -499,15 +808,15 @@ end test 'styles not applied to header cells' do - input = <<-EOS -[cols="1h,1s,1e",options="header,footer"] -|==== -|Name |Occupation| Website -|Octocat |Social coding| http://github.com -|Name |Occupation| Website -|==== + input = <<~'EOS' + [cols="1h,1s,1e",options="header,footer"] + |=== + |Name |Occupation| Website + |Octocat |Social coding| https://github.com + |Name |Occupation| Website + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > thead > tr > th', output, 3 assert_css 'table > thead > tr > th > *', output, 0 @@ -525,19 +834,19 @@ end test 'vertical table headers use th element instead of header class' do - input = <<-EOS -[cols="1h,1s,1e"] -|==== + input = <<~'EOS' + [cols="1h,1s,1e"] + |=== -|Name |Occupation| Website + |Name |Occupation| Website -|Octocat |Social coding| http://github.com + |Octocat |Social coding| https://github.com -|Name |Occupation| Website + |Name |Occupation| Website -|==== + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > tbody > tr > th', output, 3 assert_css 'table > tbody > tr > td', output, 6 @@ -548,28 +857,28 @@ end test 'supports horizontal and vertical source data with blank lines and table header' do - input = <<-EOS -.Horizontal and vertical source data -[width="80%",cols="3,^2,^2,10",options="header"] -|=== -|Date |Duration |Avg HR |Notes + input = <<~'EOS' + .Horizontal and vertical source data + [width="80%",cols="3,^2,^2,10",options="header"] + |=== + |Date |Duration |Avg HR |Notes -|22-Aug-08 |10:24 | 157 | -Worked out MSHR (max sustainable heart rate) by going hard -for this interval. + |22-Aug-08 |10:24 | 157 | + Worked out MSHR (max sustainable heart rate) by going hard + for this interval. -|22-Aug-08 |23:03 | 152 | -Back-to-back with previous interval. + |22-Aug-08 |23:03 | 152 | + Back-to-back with previous interval. -|24-Aug-08 |40:00 | 145 | -Moderately hard interspersed with 3x 3min intervals (2 min -hard + 1 min really hard taking the HR up to 160). + |24-Aug-08 |40:00 | 145 | + Moderately hard interspersed with 3x 3min intervals (2 min + hard + 1 min really hard taking the HR up to 160). -I am getting in shape! + I am getting in shape! -|=== + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table[style*="width: 80%"]', output, 1 assert_xpath '/table/caption[@class="title"][text()="Table 1. Horizontal and vertical source data"]', output, 1 @@ -591,30 +900,30 @@ end test 'percentages as column widths' do - input = <<-EOS -[cols="<.^10%,<90%"] -|=== -|column A |column B -|=== + input = <<~'EOS' + [cols="<.^10%,<90%"] + |=== + |column A |column B + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '/table/colgroup/col', output, 2 assert_xpath '(/table/colgroup/col)[1][@style="width: 10%;"]', output, 1 assert_xpath '(/table/colgroup/col)[2][@style="width: 90%;"]', output, 1 end test 'spans, alignments and styles' do - input = <<-EOS -[cols="e,m,^,>s",width="25%"] -|=== -|1 >s|2 |3 |4 -^|5 2.2+^.^|6 .3+<.>m|7 -^|8 -d|9 2+>|10 -|=== + input = <<~'EOS' + [cols="e,m,^,>s",width="25%"] + |=== + |1 >s|2 |3 |4 + ^|5 2.2+^.^|6 .3+<.>m|7 + ^|8 + d|9 2+>|10 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col[style*="width: 25%"]', output, 4 assert_css 'table > tbody > tr', output, 4 @@ -642,14 +951,14 @@ end test 'sets up columns correctly if first row has cell that spans columns' do - input = <<-EOS -|=== -2+^|AAA |CCC -|AAA |BBB |CCC -|AAA |BBB |CCC -|=== + input = <<~'EOS' + |=== + 2+^|AAA |CCC + |AAA |BBB |CCC + |AAA |BBB |CCC + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table > tbody > tr:nth-child(1) > td', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(1)[colspan="2"]', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(2):not([colspan])', output, 1 @@ -658,14 +967,14 @@ end test 'supports repeating cells' do - input = <<-EOS -|=== -3*|A -|1 3*|2 -|b |c -|=== + input = <<~'EOS' + |=== + 3*|A + |1 3*|2 + |b |c + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 3 @@ -687,14 +996,14 @@ end test 'calculates colnames correctly when using implicit column count and single cell with colspan' do - input = <<-EOS -|=== -2+|Two Columns -|One Column |One Column -|=== + input = <<~'EOS' + |=== + 2+|Two Columns + |One Column |One Column + |=== EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//colspec', output, 2 assert_xpath '(//colspec)[1][@colname="col_1"]', output, 1 assert_xpath '(//colspec)[2][@colname="col_2"]', output, 1 @@ -704,14 +1013,14 @@ end test 'calculates colnames correctly when using implicit column count and cells with mixed colspans' do - input = <<-EOS -|=== -2+|Two Columns | One Column -|One Column |One Column |One Column -|=== + input = <<~'EOS' + |=== + 2+|Two Columns | One Column + |One Column |One Column |One Column + |=== EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//colspec', output, 3 assert_xpath '(//colspec)[1][@colname="col_1"]', output, 1 assert_xpath '(//colspec)[2][@colname="col_2"]', output, 1 @@ -724,16 +1033,16 @@ end test 'assigns unique column names for table with implicit column count and colspans in first row' do - input = <<-EOS -|==== -| 2+| Node 0 2+| Node 1 + input = <<~'EOS' + |=== + | 2+| Node 0 2+| Node 1 -| Host processes | Core 0 | Core 1 | Core 4 | Core 5 -| Guest processes | Core 2 | Core 3 | Core 6 | Core 7 -|==== + | Host processes | Core 0 | Core 1 | Core 4 | Core 5 + | Guest processes | Core 2 | Core 3 | Core 6 | Core 7 + |=== EOS - output = render_embedded_string input, :backend => 'docbook' + output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//colspec', output, 5 (1..5).each do |n| assert_xpath %((//colspec)[#{n}][@colname="col_#{n}"]), output, 1 @@ -746,239 +1055,376 @@ end test 'ignores cell with colspan that exceeds colspec' do - input = <<-EOS -[cols="1,1"] -|=== -3+|A -|B -a|C - -more C -|=== - EOS - output = render_embedded_string input - assert_css 'table', output, 1 - assert_css 'table *', output, 0 - end - - test 'paragraph, verse and literal content' do - input = <<-EOS -[cols=",^v,^l",options="header"] -|=== -|Paragraphs |Verse |Literal -3*|The discussion about what is good, -what is beautiful, what is noble, -what is pure, and what is true -could always go on. - -Why is that important? -Why would I like to do that? - -Because that's the only conversation worth having. - -And whether it goes on or not after I die, I don't know. -But, I do know that it is the conversation I want to have while I am still alive. - -Which means that to me the offer of certainty, -the offer of complete security, -the offer of an impermeable faith that can't give way -is an offer of something not worth having. - -I want to live my life taking the risk all the time -that I don't know anything like enough yet... -that I haven't understood enough... -that I can't know enough... -that I am always hungrily operating on the margins -of a potentially great harvest of future knowledge and wisdom. + input = <<~'EOS' + [cols=2*] + |=== + 3+|A + |B + a|C + + more C + |=== + EOS + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table *', output, 0 + assert_message logger, :ERROR, ': line 5: dropping cell because it exceeds specified number of columns', Hash + end + end -I wouldn't have it any other way. -|=== + test 'paragraph and literal repeated content' do + input = <<~'EOS' + [cols=",^l"] + |=== + |Paragraphs |Literal + + 3*|The discussion about what is good, + what is beautiful, what is noble, + what is pure, and what is true + could always go on. + + Why is that important? + Why would I like to do that? + + Because that's the only conversation worth having. + + And whether it goes on or not after I die, I don't know. + But, I do know that it is the conversation I want to have while I am still alive. + + Which means that to me the offer of certainty, + the offer of complete security, + the offer of an impermeable faith that can't give way + is an offer of something not worth having. + + I want to live my life taking the risk all the time + that I don't know anything like enough yet... + that I haven't understood enough... + that I can't know enough... + that I am always hungrily operating on the margins + of a potentially great harvest of future knowledge and wisdom. + + I wouldn't have it any other way. + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 - assert_css 'table > colgroup > col', output, 3 + assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 - assert_css 'table > thead > tr > th', output, 3 + assert_css 'table > thead > tr > th', output, 2 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 1 - assert_css 'table > tbody > tr > td', output, 3 + assert_css 'table > tbody > tr > td', output, 2 assert_css 'table > tbody > tr > td:nth-child(1).halign-left.valign-top > p.tableblock', output, 7 - assert_css 'table > tbody > tr > td:nth-child(2).halign-center.valign-top > div.verse', output, 1 - verse = xmlnodes_at_css 'table > tbody > tr > td:nth-child(2).halign-center.valign-top > div.verse', output, 1 - assert_equal 26, verse.text.lines.entries.size - assert_css 'table > tbody > tr > td:nth-child(3).halign-center.valign-top > div.literal > pre', output, 1 - literal = xmlnodes_at_css 'table > tbody > tr > td:nth-child(3).halign-center.valign-top > div.literal > pre', output, 1 - assert_equal 26, literal.text.lines.entries.size - end - - test 'basic asciidoc cell' do - input = <<-EOS -|=== -a|-- -NOTE: content - -content --- -|=== + assert_css 'table > tbody > tr > td:nth-child(2).halign-center.valign-top > div.literal > pre', output, 1 + literal = xmlnodes_at_css 'table > tbody > tr > td:nth-child(2).halign-center.valign-top > div.literal > pre', output, 1 + assert_equal 26, literal.text.lines.size + end + + test 'should not split paragraph at line containing only {blank} that is directly adjacent to non-blank lines' do + input = <<~'EOS' + |=== + |paragraph + {blank} + still one paragraph + {blank} + still one paragraph + |=== EOS - result = render_embedded_string input - assert_css 'table.tableblock', result, 1 - assert_css 'table.tableblock td.tableblock', result, 1 - assert_css 'table.tableblock td.tableblock .openblock', result, 1 - assert_css 'table.tableblock td.tableblock .openblock .admonitionblock', result, 1 - assert_css 'table.tableblock td.tableblock .openblock .paragraph', result, 1 + result = convert_string_to_embedded input + assert_css 'p.tableblock', result, 1 end - test 'doctype can be set in asciidoc table cell' do - input = <<-EOS -|=== -a| -:doctype: inline + test 'should strip trailing newlines when splitting paragraphs' do + input = <<~'EOS' + |=== + |first wrapped + paragraph -content -|=== + second paragraph + + third paragraph + |=== EOS - result = render_embedded_string input - assert_css 'table.tableblock', result, 1 - assert_css 'table.tableblock .paragraph', result, 0 + result = convert_string_to_embedded input + assert_xpath %((//p[@class="tableblock"])[1][text()="first wrapped\nparagraph"]), result, 1 + assert_xpath %((//p[@class="tableblock"])[2][text()="second paragraph"]), result, 1 + assert_xpath %((//p[@class="tableblock"])[3][text()="third paragraph"]), result, 1 end - test 'compat mode can be activated in asciidoc table cell' do - input = <<-EOS -|=== -a| -:compat-mode: + test 'basic AsciiDoc cell' do + input = <<~'EOS' + |=== + a|-- + NOTE: content -'italic' -|=== + content + -- + |=== EOS - result = render_embedded_string input - assert_css 'table.tableblock td em', result, 1 + result = convert_string_to_embedded input + assert_css 'table.tableblock', result, 1 + assert_css 'table.tableblock td.tableblock', result, 1 + assert_css 'table.tableblock td.tableblock .openblock', result, 1 + assert_css 'table.tableblock td.tableblock .openblock .admonitionblock', result, 1 + assert_css 'table.tableblock td.tableblock .openblock .paragraph', result, 1 end - test 'asciidoc content' do - input = <<-EOS -[cols="1e,1,5a",frame="topbot",options="header"] -|=== -|Name |Backends |Description + test 'AsciiDoc table cell should be wrapped in div with class "content"' do + input = <<~'EOS' + |=== + a|AsciiDoc table cell + |=== + EOS -|badges |xhtml11, html5 | -Link badges ('XHTML 1.1' and 'CSS') in document footers. + result = convert_string_to_embedded input + assert_css 'table.tableblock td.tableblock > div.content', result, 1 + assert_css 'table.tableblock td.tableblock > div.content > div.paragraph', result, 1 + end -NOTE: The path names of images, icons and scripts are relative path -names to the output document not the source document. + test 'doctype can be set in AsciiDoc table cell' do + input = <<~'EOS' + |=== + a| + :doctype: inline -|[[X97]] docinfo, docinfo1, docinfo2 |All backends | -These three attributes control which document information -files will be included in the the header of the output file: + content + |=== + EOS -docinfo:: Include `-docinfo.` -docinfo1:: Include `docinfo.` -docinfo2:: Include `docinfo.` and `-docinfo.` + result = convert_string_to_embedded input + assert_css 'table.tableblock', result, 1 + assert_css 'table.tableblock .paragraph', result, 0 + end -Where `` is the file name (sans extension) of the AsciiDoc -input file and `` is `.html` for HTML outputs or `.xml` for -DocBook outputs. If the input file is the standard input then the -output file name is used. -|=== + test 'should reset doctype to default in AsciiDoc table cell' do + input = <<~'EOS' + = Book Title + :doctype: book + + == Chapter 1 + + |=== + a| + = AsciiDoc Table Cell + + doctype={doctype} + {backend-html5-doctype-article} + {backend-html5-doctype-book} + |=== + EOS + + result = convert_string_to_embedded input, attributes: { 'attribute-missing' => 'skip' } + assert_includes result, 'doctype=article' + refute_includes result, '{backend-html5-doctype-article}' + assert_includes result, '{backend-html5-doctype-book}' + end + + test 'should update doctype-related attributes in AsciiDoc table cell when doctype is set' do + input = <<~'EOS' + = Document Title + :doctype: article + + == Chapter 1 + + |=== + a| + = AsciiDoc Table Cell + :doctype: book + + doctype={doctype} + {backend-html5-doctype-book} + {backend-html5-doctype-article} + |=== + EOS + + result = convert_string_to_embedded input, attributes: { 'attribute-missing' => 'skip' } + assert_includes result, 'doctype=book' + refute_includes result, '{backend-html5-doctype-book}' + assert_includes result, '{backend-html5-doctype-article}' + end + + test 'AsciiDoc content' do + input = <<~'EOS' + [cols="1e,1,5a",frame="topbot",options="header"] + |=== + |Name |Backends |Description + + |badges |xhtml11, html5 | + Link badges ('XHTML 1.1' and 'CSS') in document footers. + + [NOTE] + ==== + The path names of images, icons and scripts are relative path + names to the output document not the source document. + ==== + |[[X97]] docinfo, docinfo1, docinfo2 |All backends | + These three attributes control which document information + files will be included in the the header of the output file: + + docinfo:: Include `-docinfo.` + docinfo1:: Include `docinfo.` + docinfo2:: Include `docinfo.` and `-docinfo.` + + Where `` is the file name (sans extension) of the AsciiDoc + input file and `` is `.html` for HTML outputs or `.xml` for + DocBook outputs. If the input file is the standard input then the + output file name is used. + |=== EOS - doc = document_from_string input + doc = document_from_string input, sourcemap: true table = doc.blocks.first - assert !table.nil? + refute_nil table tbody = table.rows.body assert_equal 2, tbody.size + body_cell_1_2 = tbody[0][1] + assert_equal 5, body_cell_1_2.lineno body_cell_1_3 = tbody[0][2] - assert !body_cell_1_3.inner_document.nil? + refute_nil body_cell_1_3.inner_document assert body_cell_1_3.inner_document.nested? assert_equal doc, body_cell_1_3.inner_document.parent_document assert_equal doc.converter, body_cell_1_3.inner_document.converter - output = doc.render - - assert_css 'table > tbody > tr', output, 2 - assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(3) div.admonitionblock', output, 1 - assert_css 'table > tbody > tr:nth-child(2) > td:nth-child(3) div.dlist', output, 1 + assert_equal 5, body_cell_1_3.lineno + assert_equal 6, body_cell_1_3.inner_document.lineno + note = (body_cell_1_3.inner_document.find_by context: :admonition)[0] + assert_equal 9, note.lineno + output = doc.convert standalone: false + + # NOTE JRuby matches the table inside the admonition block if the class is not specified on the table + assert_css 'table.tableblock > tbody > tr', output, 2 + assert_css 'table.tableblock > tbody > tr:nth-child(1) > td:nth-child(3) div.admonitionblock', output, 1 + assert_css 'table.tableblock > tbody > tr:nth-child(2) > td:nth-child(3) div.dlist', output, 1 + end + + test 'should preserve leading indentation in contents of AsciiDoc table cell if contents starts with newline' do + # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 + input = <<~EOS + |=== + a| + $ command + a| paragraph + |=== + EOS + doc = document_from_string input, sourcemap: true + table = doc.blocks[0] + tbody = table.rows.body + assert_equal 1, table.lineno + assert_equal 2, tbody[0][0].lineno + assert_equal 3, tbody[0][0].inner_document.lineno + assert_equal 4, tbody[1][0].lineno + output = doc.convert standalone: false + assert_css 'td', output, 2 + assert_xpath '(//td)[1]//*[@class="literalblock"]', output, 1 + assert_xpath '(//td)[2]//*[@class="paragraph"]', output, 1 + assert_xpath '(//pre)[1][text()="$ command"]', output, 1 + assert_xpath '(//p)[1][text()="paragraph"]', output, 1 end test 'preprocessor directive on first line of an AsciiDoc table cell should be processed' do - input = <<-EOS -|=== -a|include::fixtures/include-file.asciidoc[] -|=== + input = <<~'EOS' + |=== + a|include::fixtures/include-file.adoc[] + |=== EOS - output = render_embedded_string input, :safe => :safe, :base_dir => File.dirname(__FILE__) + output = convert_string_to_embedded input, safe: :safe, base_dir: testdir assert_match(/included content/, output) end test 'cross reference link in an AsciiDoc table cell should resolve to reference in main document' do - input = <<-EOS -== Some + input = <<~'EOS' + == Some -|=== -a|See <<_more>> -|=== + |=== + a|See <<_more>> + |=== -== More + == More -content + content EOS - result = render_string input + result = convert_string input assert_xpath '//a[@href="#_more"]', result, 1 assert_xpath '//a[@href="#_more"][text()="More"]', result, 1 end + test 'should discover anchor at start of cell and register it as a reference' do + input = <<~'EOS' + The highest peak in the Front Range is <>, which tops <> by just a few feet. + + [cols="1s,1"] + |=== + |[[mount-evans,Mount Evans]]Mount Evans + |14,271 feet + + h|[[grays-peak,Grays Peak]] + Grays Peak + |14,278 feet + |=== + EOS + doc = document_from_string input + refs = doc.catalog[:refs] + assert refs.key?('mount-evans') + assert refs.key?('grays-peak') + output = doc.convert standalone: false + assert_xpath '(//p)[1]/a[@href="#grays-peak"][text()="Grays Peak"]', output, 1 + assert_xpath '(//p)[1]/a[@href="#mount-evans"][text()="Mount Evans"]', output, 1 + assert_xpath '(//table/tbody/tr)[1]//td//a[@id="mount-evans"]', output, 1 + assert_xpath '(//table/tbody/tr)[2]//th//a[@id="grays-peak"]', output, 1 + end + test 'footnotes should not be shared between an AsciiDoc table cell and the main document' do - input = <<-EOS -|=== -a|AsciiDoc footnote:[A lightweight markup language.] -|=== + input = <<~'EOS' + |=== + a|AsciiDoc footnote:[A lightweight markup language.] + |=== EOS - result = render_string input - assert_css '#_footnote_1', result, 1 + result = convert_string input + assert_css '#_footnotedef_1', result, 1 end test 'callout numbers should be globally unique, including AsciiDoc table cells' do - input = <<-EOS -= Document Title + input = <<~'EOS' + = Document Title -== Section 1 + == Section 1 -|==== -a| -[source, yaml] ----- -key: value <1> ----- -<1> First callout -|==== - -== Section 2 - -|==== -a| -[source, yaml] ----- -key: value <1> ----- -<1> Second callout -|==== - -== Section 3 - -[source, yaml] ----- -key: value <1> ----- -<1> Third callout + |=== + a| + [source, yaml] + ---- + key: value <1> + ---- + <1> First callout + |=== + + == Section 2 + + |=== + a| + [source, yaml] + ---- + key: value <1> + ---- + <1> Second callout + |=== + + == Section 3 + + [source, yaml] + ---- + key: value <1> + ---- + <1> Third callout EOS - result = render_string input, :backend => 'docbook' + result = convert_string_to_embedded input, backend: 'docbook' conums = xmlnodes_at_xpath '//co', result assert_equal 3, conums.size ['CO1-1', 'CO2-1', 'CO3-1'].each_with_index do |conum, idx| @@ -991,87 +1437,168 @@ end end + test 'compat mode can be activated in AsciiDoc table cell' do + input = <<~'EOS' + |=== + a| + :compat-mode: + + The word 'italic' is emphasized. + |=== + EOS + + result = convert_string_to_embedded input + assert_xpath '//em[text()="italic"]', result, 1 + end + + test 'compat mode in AsciiDoc table cell inherits from parent document' do + input = <<~'EOS' + :compat-mode: + + The word 'italic' is emphasized. + + [cols=1*] + |=== + |The word 'oblique' is emphasized. + a| + The word 'slanted' is emphasized. + |=== + + The word 'askew' is emphasized. + EOS + + result = convert_string_to_embedded input + assert_xpath '//em[text()="italic"]', result, 1 + assert_xpath '//em[text()="oblique"]', result, 1 + assert_xpath '//em[text()="slanted"]', result, 1 + assert_xpath '//em[text()="askew"]', result, 1 + end + + test 'compat mode in AsciiDoc table cell can be unset if set in parent document' do + input = <<~'EOS' + :compat-mode: + + The word 'italic' is emphasized. + + [cols=1*] + |=== + |The word 'oblique' is emphasized. + a| + :!compat-mode: + + The word 'slanted' is not emphasized. + |=== + + The word 'askew' is emphasized. + EOS + + result = convert_string_to_embedded input + assert_xpath '//em[text()="italic"]', result, 1 + assert_xpath '//em[text()="oblique"]', result, 1 + assert_xpath '//em[text()="slanted"]', result, 0 + assert_xpath '//em[text()="askew"]', result, 1 + end + test 'nested table' do - input = <<-EOS -[cols="1,2a"] -|=== -|Normal cell -|Cell with nested table -[cols="2,1"] -!=== -!Nested table cell 1 !Nested table cell 2 -!=== -|=== + input = <<~'EOS' + [cols="1,2a"] + |=== + |Normal cell + |Cell with nested table + [cols="2,1"] + !=== + !Nested table cell 1 !Nested table cell 2 + !=== + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 2 assert_css 'table table', output, 1 - assert_css 'table table', output, 1 assert_css 'table > tbody > tr > td:nth-child(2) table', output, 1 assert_css 'table > tbody > tr > td:nth-child(2) table > tbody > tr > td', output, 2 end + test 'can set format of nested table to psv' do + input = <<~'EOS' + [cols="2*"] + |=== + |normal cell + a| + [format=psv] + !=== + !nested cell + !=== + |=== + EOS + + output = convert_string_to_embedded input + assert_css 'table', output, 2 + assert_css 'table table', output, 1 + assert_css 'table > tbody > tr > td:nth-child(2) table', output, 1 + assert_css 'table > tbody > tr > td:nth-child(2) table > tbody > tr > td', output, 1 + end + test 'toc from parent document should not be included in an AsciiDoc table cell' do - input = <<-EOS -= Document Title -:toc: + input = <<~'EOS' + = Document Title + :toc: -== Section A + == Section A -|=== -a|AsciiDoc content -|=== + |=== + a|AsciiDoc content + |=== EOS - output = render_string input + output = convert_string input assert_css '.toc', output, 1 assert_css 'table .toc', output, 0 end test 'should be able to enable toc in an AsciiDoc table cell' do - input = <<-EOS -= Document Title + input = <<~'EOS' + = Document Title -== Section A + == Section A -|=== -a| -= Subdocument Title -:toc: + |=== + a| + = Subdocument Title + :toc: -== Subdocument Section A + == Subdocument Section A -content -|=== + content + |=== EOS - output = render_string input + output = convert_string input assert_css '.toc', output, 1 assert_css 'table .toc', output, 1 end test 'should be able to enable toc in both outer document and in an AsciiDoc table cell' do - input = <<-EOS -= Document Title -:toc: + input = <<~'EOS' + = Document Title + :toc: -== Section A + == Section A -|=== -a| -= Subdocument Title -:toc: macro + |=== + a| + = Subdocument Title + :toc: macro -[#table-cell-toc] -toc::[] + [#table-cell-toc] + toc::[] -== Subdocument Section A + == Subdocument Section A -content -|=== + content + |=== EOS - output = render_string input + output = convert_string input assert_css '.toc', output, 2 assert_css '#toc', output, 1 assert_css 'table .toc', output, 1 @@ -1079,16 +1606,16 @@ end test 'document in an AsciiDoc table cell should not see doctitle of parent' do - input = <<-EOS -= Document Title + input = <<~'EOS' + = Document Title -[cols="1a"] -|=== -|AsciiDoc content -|=== + [cols="1a"] + |=== + |AsciiDoc content + |=== EOS - output = render_string input + output = convert_string input assert_css 'table', output, 1 assert_css 'table > tbody > tr > td', output, 1 assert_css 'table > tbody > tr > td #preamble', output, 0 @@ -1096,41 +1623,149 @@ end test 'cell background color' do - input = <<-EOS -[cols="1e,1", options="header"] -|=== -|{set:cellbgcolor:green}green -|{set:cellbgcolor!} -plain -|{set:cellbgcolor:red}red -|{set:cellbgcolor!} -plain -|=== + input = <<~'EOS' + [cols="1e,1", options="header"] + |=== + |{set:cellbgcolor:green}green + |{set:cellbgcolor!} + plain + |{set:cellbgcolor:red}red + |{set:cellbgcolor!} + plain + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '(/table/thead/tr/th)[1][@style="background-color: green;"]', output, 1 assert_xpath '(/table/thead/tr/th)[2][@style="background-color: green;"]', output, 0 assert_xpath '(/table/tbody/tr/td)[1][@style="background-color: red;"]', output, 1 assert_xpath '(/table/tbody/tr/td)[2][@style="background-color: green;"]', output, 0 end + + test 'should warn if table block is not terminated' do + input = <<~'EOS' + outside + + |=== + | + inside + + still inside + + eof + EOS + + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_xpath '/table', output, 1 + assert_message logger, :WARN, ': line 3: unterminated table block', Hash + end + end + + test 'should show correct line number in warning about unterminated block inside AsciiDoc table cell' do + input = <<~'EOS' + outside + + * list item + + + |=== + |cell + a|inside + + ==== + unterminated example block + |=== + + eof + EOS + + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_xpath '//ul//table', output, 1 + assert_message logger, :WARN, ': line 9: unterminated example block', Hash + end + end + + test 'custom separator for an AsciiDoc table cell' do + input = <<~'EOS' + [cols=2,separator=!] + |=== + !Pipe output to vim + a! + ---- + asciidoctor -o - -s test.adoc | view - + ---- + |=== + EOS + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > colgroup > col', output, 2 + assert_css 'table > tbody > tr', output, 1 + assert_css 'table > tbody > tr:nth-child(1) > td', output, 2 + assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(1) p', output, 1 + assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(2) .listingblock', output, 1 + end + + test 'table with breakable option docbook 5' do + input = <<~'EOS' + .Table with breakable + [%breakable] + |=== + |Item |Quantity + |Item 1 |1 + |=== + EOS + output = convert_string_to_embedded input, backend: 'docbook5' + assert_includes output, '' + end + + test 'table with unbreakable option docbook 5' do + input = <<~'EOS' + .Table with unbreakable + [%unbreakable] + |=== + |Item |Quantity + |Item 1 |1 + |=== + EOS + output = convert_string_to_embedded input, backend: 'docbook5' + assert_includes output, '' + end + + test 'no implicit header row if cell in first line is quoted and spans multiple lines' do + input = <<~'EOS' + [cols=2*l] + ,=== + "A1 + + A1 continued",B1 + A2,B2 + ,=== + EOS + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > colgroup > col', output, 2 + assert_css 'table > thead', output, 0 + assert_css 'table > tbody', output, 1 + assert_css 'table > tbody > tr', output, 2 + assert_xpath %((//td)[1]//pre[text()="A1\n\nA1 continued"]), output, 1 + end end context 'DSV' do - - test 'renders simple dsv table' do - input = <<-EOS -[width="75%",format="dsv"] -|=== -root:x:0:0:root:/root:/bin/bash -bin:x:1:1:bin:/bin:/sbin/nologin -mysql:x:27:27:MySQL\\:Server:/var/lib/mysql:/bin/bash -gdm:x:42:42::/var/lib/gdm:/sbin/nologin -sshd:x:74:74:Privilege-separated SSH:/var/empty/sshd:/sbin/nologin -nobody:x:99:99:Nobody:/:/sbin/nologin -|=== + test 'converts simple dsv table' do + input = <<~'EOS' + [width="75%",format="dsv"] + |=== + root:x:0:0:root:/root:/bin/bash + bin:x:1:1:bin:/bin:/sbin/nologin + mysql:x:27:27:MySQL\:Server:/var/lib/mysql:/bin/bash + gdm:x:42:42::/var/lib/gdm:/sbin/nologin + sshd:x:74:74:Privilege-separated SSH:/var/empty/sshd:/sbin/nologin + nobody:x:99:99:Nobody:/:/sbin/nologin + |=== EOS - doc = document_from_string input, :header_footer => false + doc = document_from_string input, standalone: false table = doc.blocks[0] assert 100, table.columns.map {|col| col.attributes['colpcwidth'] }.reduce(:+) output = doc.convert @@ -1143,13 +1778,13 @@ end test 'dsv format shorthand' do - input = <<-EOS -:=== -a:b:c -1:2:3 -:=== + input = <<~'EOS' + :=== + a:b:c + 1:2:3 + :=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 @@ -1158,25 +1793,25 @@ end test 'single cell in DSV table should only produce single row' do - input = <<-EOS -:=== -single cell -:=== + input = <<~'EOS' + :=== + single cell + :=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table td', output, 1 end test 'should treat trailing colon as an empty cell' do - input = <<-EOS -:==== -A1: -B1:B2 -C1:C2 -:==== + input = <<~'EOS' + :=== + A1: + B1:B2 + C1:C2 + :=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 @@ -1188,16 +1823,15 @@ end context 'CSV' do - test 'should treat trailing comma as an empty cell' do - input = <<-EOS -,==== -A1, -B1,B2 -C1,C2 -,==== + input = <<~'EOS' + ,=== + A1, + B1,B2 + C1,C2 + ,=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 @@ -1207,36 +1841,106 @@ assert_xpath '/table/tbody/tr[2]/td[1]/p[text()="B1"]', output, 1 end - test 'mixed unquoted records and quoted records with escaped quotes, commas and wrapped lines' do - input = <<-EOS -[format="csv",options="header"] -|=== -Year,Make,Model,Description,Price -1997,Ford,E350,"ac, abs, moon",3000.00 -1999,Chevy,"Venture ""Extended Edition""","",4900.00 -1999,Chevy,"Venture ""Extended Edition, Very Large""",,5000.00 -1996,Jeep,Grand Cherokee,"MUST SELL! -air, moon roof, loaded",4799.00 -|=== + test 'should log error but not crash if cell data has unclosed quote' do + input = <<~'EOS' + ,=== + a,b + c," + ,=== + EOS + using_memory_logger do |logger| + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table td', output, 4 + assert_xpath '(/table/td)[4]/p', output, 0 + assert_message logger, :ERROR, ': line 3: unclosed quote in CSV data; setting cell to empty', Hash + end + end + + test 'should preserve newlines in quoted CSV values' do + input = <<~'EOS' + [cols="1,1,1l"] + ,=== + "A + B + C","one + + two + + three","do + + re + + me" + ,=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > colgroup > col', output, 3 + assert_css 'table > tbody > tr', output, 1 + assert_xpath '/table/tbody/tr[1]/td', output, 3 + assert_xpath %(/table/tbody/tr[1]/td[1]/p[text()="A\nB\nC"]), output, 1 + assert_xpath '/table/tbody/tr[1]/td[2]/p', output, 3 + assert_xpath '/table/tbody/tr[1]/td[2]/p[1][text()="one"]', output, 1 + assert_xpath '/table/tbody/tr[1]/td[2]/p[2][text()="two"]', output, 1 + assert_xpath '/table/tbody/tr[1]/td[2]/p[3][text()="three"]', output, 1 + assert_xpath %(/table/tbody/tr[1]/td[3]//pre[text()="do\n\nre\n\nme"]), output, 1 + end + + test 'mixed unquoted records and quoted records with escaped quotes, commas, and wrapped lines' do + input = <<~'EOS' + [format="csv",options="header"] + |=== + Year,Make,Model,Description,Price + 1997,Ford,E350,"ac, abs, moon",3000.00 + 1999,Chevy,"Venture ""Extended Edition""","",4900.00 + 1999,Chevy,"Venture ""Extended Edition, Very Large""",,5000.00 + 1996,Jeep,Grand Cherokee,"MUST SELL! + air, moon roof, loaded",4799.00 + 2000,Toyota,Tundra,"""This one's gonna to blow you're socks off,"" per the sticker",10000.00 + 2000,Toyota,Tundra,"Check it, ""this one's gonna to blow you're socks off"", per the sticker",10000.00 + |=== + EOS + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col[style*="width: 20%"]', output, 5 assert_css 'table > thead > tr', output, 1 - assert_css 'table > tbody > tr', output, 4 + assert_css 'table > tbody > tr', output, 6 assert_xpath '((//tbody/tr)[1]/td)[4]/p[text()="ac, abs, moon"]', output, 1 assert_xpath %(((//tbody/tr)[2]/td)[3]/p[text()='Venture "Extended Edition"']), output, 1 - assert_xpath '((//tbody/tr)[4]/td)[4]/p[text()="MUST SELL! air, moon roof, loaded"]', output, 1 + assert_xpath %(((//tbody/tr)[4]/td)[4]/p[text()="MUST SELL!\nair, moon roof, loaded"]), output, 1 + assert_xpath %(((//tbody/tr)[5]/td)[4]/p[text()='"This one#{decode_char 8217}s gonna to blow you#{decode_char 8217}re socks off," per the sticker']), output, 1 + assert_xpath %(((//tbody/tr)[6]/td)[4]/p[text()='Check it, "this one#{decode_char 8217}s gonna to blow you#{decode_char 8217}re socks off", per the sticker']), output, 1 + end + + test 'should allow quotes around a CSV value to be on their own lines' do + input = <<~'EOS' + [cols=2*] + ,=== + " + A + "," + B + " + ,=== + EOS + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > colgroup > col', output, 2 + assert_css 'table > tbody > tr', output, 1 + assert_xpath '/table/tbody/tr[1]/td', output, 2 + assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A"]', output, 1 + assert_xpath '/table/tbody/tr[1]/td[2]/p[text()="B"]', output, 1 end test 'csv format shorthand' do - input = <<-EOS -,=== -a,b,c -1,2,3 -,=== + input = <<~'EOS' + ,=== + a,b,c + 1,2,3 + ,=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 @@ -1244,15 +1948,15 @@ assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end - test 'custom separator' do - input = <<-EOS -[format="csv", separator=";"] -|=== -a;b;c -1;2;3 -|=== + test 'tsv as format' do + input = <<~EOS + [format=tsv] + ,=== + a\tb\tc + 1\t2\t3 + ,=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 @@ -1260,87 +1964,77 @@ assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end - test 'custom separator for an AsciiDoc table cell' do - input = <<-EOS -[cols=2,separator=!] -|=== -!Pipe output to vim -a! ----- -asciidoctor -o - -s test.adoc | view - ----- -|=== + test 'custom csv separator' do + input = <<~'EOS' + [format=csv,separator=;] + |=== + a;b;c + 1;2;3 + |=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table', output, 1 - assert_css 'table > colgroup > col', output, 2 - assert_css 'table > tbody > tr', output, 1 - assert_css 'table > tbody > tr:nth-child(1) > td', output, 2 - assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(1) p', output, 1 - assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(2) .listingblock', output, 1 + assert_css 'table > colgroup > col', output, 3 + assert_css 'table > tbody > tr', output, 2 + assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 + assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 + end + + test 'tab as separator' do + input = <<~EOS + [separator=\\t] + ,=== + a\tb\tc + 1\t2\t3 + ,=== + EOS + output = convert_string_to_embedded input + assert_css 'table', output, 1 + assert_css 'table > colgroup > col', output, 3 + assert_css 'table > tbody > tr', output, 2 + assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 + assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end test 'single cell in CSV table should only produce single row' do - input = <<-EOS -,=== -single cell -,=== + input = <<~'EOS' + ,=== + single cell + ,=== EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_css 'table td', output, 1 end - test 'table with breakable db45' do - input = <<-EOS -.Table with breakable -[options="breakable"] -|=== -|Item |Quantity -|Item 1 |1 -|=== - EOS - output = render_embedded_string input, :backend => 'docbook45' - assert output.include?('') - end - - test 'table with breakable db5' do - input = <<-EOS -.Table with breakable -[options="breakable"] -|=== -|Item |Quantity -|Item 1 |1 -|=== - EOS - output = render_embedded_string input, :backend => 'docbook5' - assert output.include?('') - end - - test 'table with unbreakable db5' do - input = <<-EOS -.Table with unbreakable -[options="unbreakable"] -|=== -|Item |Quantity -|Item 1 |1 -|=== - EOS - output = render_embedded_string input, :backend => 'docbook5' - assert output.include?('') - end - - test 'table with unbreakable db45' do - input = <<-EOS -.Table with unbreakable -[options="unbreakable"] -|=== -|Item |Quantity -|Item 1 |1 -|=== + test 'cell formatted with AsciiDoc style' do + input = <<~'EOS' + [cols="1,1,1a",separator=;] + ,=== + element;description;example + + thematic break,a visible break; also known as a horizontal rule;--- + ,=== EOS - output = render_embedded_string input, :backend => 'docbook45' - assert output.include?('') + + output = convert_string_to_embedded input + assert_css 'table tbody hr', output, 1 + end + + test 'should strip whitespace around contents of AsciiDoc cell' do + input = <<~'EOS' + [cols="1,1,1a",separator=;] + ,=== + element;description;example + + paragraph;contiguous lines of words and phrases;" + one sentence, one line + " + ,=== + EOS + + output = convert_string_to_embedded input + assert_xpath '/table/tbody//*[@class="paragraph"]/p[text()="one sentence, one line"]', output, 1 end end end diff -Nru asciidoctor-1.5.5/test/test_helper.rb asciidoctor-2.0.10/test/test_helper.rb --- asciidoctor-1.5.5/test/test_helper.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/test_helper.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,20 +1,25 @@ -# encoding: UTF-8 -ASCIIDOCTOR_PROJECT_DIR = File.dirname File.dirname(__FILE__) -Dir.chdir ASCIIDOCTOR_PROJECT_DIR - -if RUBY_VERSION < '1.9' - require 'rubygems' -end +# frozen_string_literal: true +ASCIIDOCTOR_TEST_DIR = File.absolute_path __dir__ +ASCIIDOCTOR_LIB_DIR = ENV['ASCIIDOCTOR_LIB_DIR'] || (File.join ASCIIDOCTOR_TEST_DIR, '../lib') require 'simplecov' if ENV['COVERAGE'] == 'true' -require File.join(ASCIIDOCTOR_PROJECT_DIR, 'lib', 'asciidoctor') +require File.join ASCIIDOCTOR_LIB_DIR, 'asciidoctor' +Dir.chdir Asciidoctor::ROOT_DIR -require 'socket' require 'nokogiri' +proc do + old_verbose, $VERBOSE = $VERBOSE, nil + require 'rouge' + Rouge::Lexer.disable_debug! + $VERBOSE = old_verbose +end.call +require 'socket' +require 'tempfile' require 'tmpdir' autoload :FileUtils, 'fileutils' +autoload :Open3, 'open3' autoload :Pathname, 'pathname' RE_XMLNS_ATTRIBUTE = / xmlns="[^"]+"/ @@ -26,99 +31,103 @@ Minitest::Test = MiniTest::Unit::TestCase unless defined? Minitest::Test class Minitest::Test + def jruby? + RUBY_ENGINE == 'jruby' + end + def windows? RbConfig::CONFIG['host_os'] =~ /win|ming/ end def disk_root - "#{windows? ? File.expand_path(__FILE__).split('/').first : nil}/" + %(#{windows? ? (Asciidoctor::ROOT_DIR.partition '/')[0] : ''}/) end def empty_document options = {} - if options[:parse] - (Asciidoctor::Document.new [], options).parse - else - Asciidoctor::Document.new [], options - end + options[:parse] ? (Asciidoctor::Document.new [], options).parse : (Asciidoctor::Document.new [], options) end def empty_safe_document options = {} - options[:safe] = :safe - Asciidoctor::Document.new [], options + Asciidoctor::Document.new [], (options.merge safe: :safe) end - def sample_doc_path(name) - name = name.to_s - unless name.include?('.') - ['asciidoc', 'txt'].each do |ext| - if File.exist?(fixture_path("#{name}.#{ext}")) - name = "#{name}.#{ext}" + def sample_doc_path name + unless (name = name.to_s).include? '.' + %w(adoc asciidoc txt).each do |ext| + if File.exist? fixture_path %(#{name}.#{ext}) + name = %(#{name}.#{ext}) break end end end - fixture_path(name) + fixture_path name end - def fixture_path(name) - File.join(File.expand_path(File.dirname(__FILE__)), 'fixtures', name) + def bindir + File.join Asciidoctor::ROOT_DIR, 'bin' end - def example_document(name, opts = {}) - document_from_string File.read(sample_doc_path(name)), opts + def asciidoctor_cmd use_ruby = true, ruby_opts = nil + executable = File.join bindir, 'asciidoctor' + if use_ruby + ruby = File.join RbConfig::CONFIG['bindir'], RbConfig::CONFIG['ruby_install_name'] + ruby = %(#{ruby} #{ruby_opts}) if ruby_opts + %(#{ruby} #{executable}) + else + executable + end end - def assert_difference(expression, difference = 1, message = nil, &block) - expressions = [expression] + def testdir + ASCIIDOCTOR_TEST_DIR + end - exps = expressions.map { |e| - e.respond_to?(:call) ? e : lambda { eval(e, block.binding) } - } - before = exps.map { |e| e.call } + def fixturedir + File.join testdir, 'fixtures' + end - yield + def fixture_path name + File.join fixturedir, name + end - expressions.zip(exps).each_with_index do |(code, e), i| - error = "#{code.inspect} didn't change by #{difference}" - error = "#{message}.\n#{error}" if message - assert_equal(before[i] + difference, e.call, error) - end + def example_document name, opts = {} + document_from_string (File.read (sample_doc_path name), mode: Asciidoctor::FILE_READ_MODE), opts end - def xmlnodes_at_css(css, content, count = nil) - xmlnodes_at_path(:css, css, content) + def xmlnodes_at_css css, content, count = nil + xmlnodes_at_path :css, css, content, count end - def xmlnodes_at_xpath(xpath, content, count = nil) - xmlnodes_at_path(:xpath, xpath, content) + def xmlnodes_at_xpath xpath, content, count = nil + xmlnodes_at_path :xpath, xpath, content, count end - def xmlnodes_at_path(type, path, content, count = nil) + def xmlnodes_at_path type, path, content, count = nil doc = xmldoc_from_string content case type - when :xpath - namespaces = doc.respond_to?(:root) ? doc.root.namespaces : {} - results = doc.xpath("#{path.sub('/', './')}", namespaces) - when :css - results = doc.css(path) + when :xpath + namespaces = (doc.respond_to? :root) ? doc.root.namespaces : {} + results = doc.xpath %(#{path.sub '/', './'}), namespaces + when :css + results = doc.css path end count == 1 ? results.first : results end # Generate an xpath attribute matcher that matches a name in the class attribute - def contains_class(name) + def contains_class name %(contains(concat(' ', normalize-space(@class), ' '), ' #{name} ')) end - def assert_css(css, content, count = nil) - assert_path(:css, css, content, count) + def assert_css css, content, count = nil + assert_path :css, css, content, count end - def assert_xpath(xpath, content, count = nil) - assert_path(:xpath, xpath, content, count) + def assert_xpath xpath, content, count = nil + assert_path :xpath, xpath, content, count end - def assert_path(type, path, content, count = nil) + def assert_path type, path, content, count = nil case type when :xpath type_name = 'XPath' @@ -128,182 +137,261 @@ results = xmlnodes_at_path type, path, content - if (count == true || count == false) - if (count != results) - flunk "#{type_name} #{path} yielded #{results} rather than #{count} for:\n#{content}" - else + if count == true || count == false + if count == results assert true + else + flunk %(#{type_name} #{path} yielded #{results} rather than #{count} for:\n#{content}) end - elsif (count && results.length != count) - flunk "#{type_name} #{path} yielded #{results.length} elements rather than #{count} for:\n#{content}" - elsif (count.nil? && results.empty?) - flunk "#{type_name} #{path} not found in:\n#{content}" + elsif count && results.size != count + flunk %(#{type_name} #{path} yielded #{results.size} elements rather than #{count} for:\n#{content}) + elsif count.nil? && results.empty? + flunk %(#{type_name} #{path} not found in:\n#{content}) else assert true end end - def xmldoc_from_string(content) - if content.match(RE_XMLNS_ATTRIBUTE) - doc = Nokogiri::XML::Document.parse(content) - elsif !(doctype_match = content.match(RE_DOCTYPE)) - doc = Nokogiri::HTML::DocumentFragment.parse(content) - elsif doctype_match[1].start_with? 'html' - doc = Nokogiri::HTML::Document.parse(content) + def assert_include expected, actual + assert_includes actual, expected + end + + def refute_include not_expected, actual + refute_includes actual, not_expected + end + + def assert_message logger, severity, expected_message, kind = String, idx = nil + unless idx + assert_equal 1, logger.messages.size + idx = 0 + end + message = logger.messages[idx] + assert_equal severity, message[:severity] + assert_kind_of kind, message[:message] + if kind == String + actual_message = message[:message] + else + refute_nil message[:message][:source_location] + actual_message = message[:message].inspect + end + if expected_message.start_with? '~' + assert_includes actual_message, expected_message[1..-1] else - doc = Nokogiri::XML::Document.parse(content) + assert_equal expected_message, actual_message end end - def document_from_string(src, opts = {}) - assign_default_test_options opts - if opts[:parse] - (Asciidoctor::Document.new src.lines.entries, opts).parse + def assert_messages logger, expected_messages + assert_equal expected_messages.size, logger.messages.size + expected_messages.each_with_index do |expected_message_details, idx| + severity, expected_message, kind = expected_message_details + assert_message logger, severity, expected_message, (kind || String), idx + end + end + + def xmldoc_from_string content + if (content.start_with? ' "<" # - # Returns the String entity expanded to its equivalent UTF-8 glyph - def expand_entity(number) - [number].pack('U*') + # Returns the decoded String that corresponds to the numeric character reference + def decode_char number + [number].pack 'U1' end - alias :entity :expand_entity - def invoke_cli_with_filenames(argv = [], filenames = [], &block) + def invoke_cli_with_filenames argv = [], filenames = [], &block + filepaths = [] - filepaths = Array.new - - filenames.each { |filename| - if filenames.nil?|| ::Pathname.new(filename).absolute? - filepaths.push(filename) + filenames.each do |filename| + if filenames.nil? || (Pathname.new filename).absolute? + filepaths << filename else - filepaths.push(File.join(File.dirname(__FILE__), 'fixtures', filename)) + filepaths << (fixture_path filename) end - } - - invoker = Asciidoctor::Cli::Invoker.new(argv + filepaths) + end + invoker = Asciidoctor::Cli::Invoker.new argv + filepaths invoker.invoke!(&block) invoker end - def invoke_cli_to_buffer(argv = [], filename = 'sample.asciidoc', &block) - invoke_cli(argv, filename, [StringIO.new, StringIO.new], &block) + def invoke_cli_to_buffer argv = [], filename = 'sample.adoc', &block + invoke_cli argv, filename, [StringIO.new, StringIO.new], &block end - def invoke_cli(argv = [], filename = 'sample.asciidoc', buffers = nil, &block) - if filename.nil? || filename == '-' || ::Pathname.new(filename).absolute? + def invoke_cli argv = [], filename = 'sample.adoc', buffers = nil, &block + if filename.nil? || filename == '-' || (Pathname.new filename).absolute? filepath = filename else - filepath = File.join(File.dirname(__FILE__), 'fixtures', filename) - end - invoker = Asciidoctor::Cli::Invoker.new(argv + [filepath]) - if buffers - invoker.redirect_streams(*buffers) + filepath = fixture_path filename end + invoker = Asciidoctor::Cli::Invoker.new argv + [filepath] + invoker.redirect_streams(*buffers) if buffers invoker.invoke!(&block) invoker end def redirect_streams - old_stdout, $stdout = $stdout, (tmp_stdout = ::StringIO.new) - old_stderr, $stderr = $stderr, (tmp_stderr = ::StringIO.new) + old_stdout, $stdout = $stdout, StringIO.new + old_stderr, $stderr = $stderr, StringIO.new + old_logger = Asciidoctor::LoggerManager.logger + old_logger_level = old_logger.level + new_logger = (Asciidoctor::LoggerManager.logger = Asciidoctor::Logger.new $stderr) + new_logger.level = old_logger_level + yield $stdout, $stderr + ensure + $stdout, $stderr = old_stdout, old_stderr + Asciidoctor::LoggerManager.logger = old_logger + end + + def resolve_localhost + Socket.ip_address_list.find(&:ipv4?).ip_address + end + + def using_memory_logger level = nil + old_logger = Asciidoctor::LoggerManager.logger + memory_logger = Asciidoctor::MemoryLogger.new + memory_logger.level = level if level begin - yield tmp_stdout, tmp_stderr + Asciidoctor::LoggerManager.logger = memory_logger + yield memory_logger ensure - $stdout = old_stdout - $stderr = old_stderr + Asciidoctor::LoggerManager.logger = old_logger end end - def resolve_localhost - (RUBY_VERSION < '1.9' || RUBY_ENGINE == 'rbx') ? Socket.gethostname : - Socket.ip_address_list.find {|addr| addr.ipv4? }.ip_address + def in_verbose_mode + begin + old_logger_level, Asciidoctor::LoggerManager.logger.level = Asciidoctor::LoggerManager.logger.level, Logger::Severity::DEBUG + yield + ensure + Asciidoctor::LoggerManager.logger.level = old_logger_level + end + end + + def run_command *args, &block + if Hash === (env = args[0]) + cmd = args[1] + else + cmd, env = env, nil + end + opts = { err: [:child, :out] } + if env + # NOTE remove workaround once https://github.com/jruby/jruby/issues/3428 is resolved + if jruby? + begin + old_env, env = ENV, (ENV.merge env) + env.each {|key, val| env.delete key if val.nil? } if env.value? nil + ENV.replace env + IO.popen cmd, opts, &block + ensure + ENV.replace old_env + end + elsif env.value? nil + env = env.inject(ENV.to_h) do |acc, (key, val)| + val.nil? ? (acc.delete key) : (acc[key] = val) + acc + end + IO.popen env, cmd, (opts.merge unsetenv_others: true), &block + else + IO.popen env, cmd, opts, &block + end + else + IO.popen cmd, opts, &block + end end def using_test_webserver host = resolve_localhost, port = 9876 + base_dir = testdir server = TCPServer.new host, port - base_dir = File.expand_path File.dirname __FILE__ - t = Thread.new do + server_thread = Thread.start do while (session = server.accept) request = session.gets - resource = nil - if (m = /GET (\S+) HTTP\/1\.1$/.match(request.chomp)) - resource = (resource = m[1]) == '' ? '.' : resource + if /^GET (\S+) HTTP\/1\.1$/ =~ request.chomp + resource = (resource = $1) == '' ? '.' : resource else session.print %(HTTP/1.1 405 Method Not Allowed\r\nContent-Type: text/plain\r\n\r\n) session.print %(405 - Method not allowed\n) session.close - break + next end - if resource == '/name/asciidoctor' session.print %(HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n\r\n) session.print %({"name": "asciidoctor"}\n) elsif File.file?(resource_file = (File.join base_dir, resource)) - mimetype = if (ext = ::File.extname(resource_file)[1..-1]) + mimetype = if (ext = File.extname(resource_file)[1..-1]) ext == 'adoc' ? 'text/plain' : %(image/#{ext}) else 'text/plain' end session.print %(HTTP/1.1 200 OK\r\nContent-Type: #{mimetype}\r\n\r\n) - File.open resource_file, 'rb' do |fd| + File.open resource_file, Asciidoctor::FILE_READ_MODE do |fd| until fd.eof? do buffer = fd.read 256 session.write buffer @@ -319,13 +407,9 @@ begin yield ensure - begin - server.shutdown - # "Errno::ENOTCONN: Socket is not connected' is reported on some platforms; call #close instead of #shutdown - rescue Errno::ENOTCONN - server.close - end - t.exit + server_thread.exit + server_thread.value + server.close end end end @@ -343,57 +427,57 @@ # block syntax. Adding setup or teardown instance methods defeats the purpose # of this library. class Minitest::Test - def self.setup(&block) - define_method :setup do - super(&block) - instance_eval(&block) + class << self + def setup &block + define_method :setup do + super(&block) + instance_eval(&block) + end end - end - def self.teardown(&block) - define_method :teardown do - instance_eval(&block) - super(&block) + def teardown &block + define_method :teardown do + instance_eval(&block) + super(&block) + end end - end - def self.context(*name, &block) - subclass = Class.new(self) - remove_tests(subclass) - subclass.class_eval(&block) if block_given? - const_set(context_name(name.join(" ")), subclass) - end + def context *name, &block + subclass = Class.new self + remove_tests subclass + subclass.class_eval(&block) if block_given? + const_set (context_name name.join(' ')), subclass + end - def self.test(name, &block) - define_method(test_name(name), &block) - end + def test name, &block + define_method (test_name name), &block + end - class << self - alias_method :should, :test - alias_method :describe, :context - end + def remove_tests subclass + subclass.public_instance_methods.each do |m| + subclass.send :undef_method, m if m.to_s.start_with? 'test_' + end + end -private + alias should test + alias describe context - def self.context_name(name) - "Test#{sanitize_name(name).gsub(/(^| )(\w)/) { $2.upcase }}".to_sym - end + private - def self.test_name(name) - "test_#{sanitize_name(name).gsub(/\s+/,'_')}".to_sym - end + def context_name name + %(Test#{(sanitize_name name).gsub(/(^| )(\w)/) { $2.upcase }}).to_sym + end - def self.sanitize_name(name) - name.gsub(/\W+/, ' ').strip - end + def test_name name + %(test_#{((sanitize_name name).gsub %r/\s+/, '_')}).to_sym + end - def self.remove_tests(subclass) - subclass.public_instance_methods.grep(/^test_/).each do |meth| - subclass.send(:undef_method, meth.to_sym) + def sanitize_name name + (name.gsub %r/\W+/, ' ').strip end end end -def context(*name, &block) - Minitest::Test.context(name, &block) +def context *name, &block + Minitest::Test.context name, &block end diff -Nru asciidoctor-1.5.5/test/text_test.rb asciidoctor-2.0.10/test/text_test.rb --- asciidoctor-1.5.5/test/text_test.rb 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/test/text_test.rb 2019-08-18 16:11:54.000000000 +0000 @@ -1,91 +1,86 @@ -# encoding: UTF-8 -unless defined? ASCIIDOCTOR_PROJECT_DIR - $: << File.dirname(__FILE__); $:.uniq! - require 'test_helper' -end +# frozen_string_literal: true +require_relative 'test_helper' context "Text" do test "proper encoding to handle utf8 characters in document using html backend" do - output = example_document(:encoding).render + output = example_document(:encoding).convert assert_xpath '//p', output, 4 assert_xpath '//a', output, 1 end test "proper encoding to handle utf8 characters in embedded document using html backend" do - output = example_document(:encoding, :header_footer => false).render + output = example_document(:encoding, standalone: false).convert assert_xpath '//p', output, 4 assert_xpath '//a', output, 1 end - test "proper encoding to handle utf8 characters in document using docbook45 backend" do - output = example_document(:encoding, :attributes => {'backend' => 'docbook45', 'xmlns' => ''}).render + test 'proper encoding to handle utf8 characters in document using docbook backend' do + output = example_document(:encoding, attributes: { 'backend' => 'docbook', 'xmlns' => '' }).convert assert_xpath '//xmlns:simpara', output, 4 - assert_xpath '//xmlns:ulink', output, 1 + assert_xpath '//xmlns:link', output, 1 end - test "proper encoding to handle utf8 characters in embedded document using docbook45 backend" do - output = example_document(:encoding, :header_footer => false, :attributes => {'backend' => 'docbook45'}).render + test 'proper encoding to handle utf8 characters in embedded document using docbook backend' do + output = example_document(:encoding, standalone: false, attributes: { 'backend' => 'docbook' }).convert assert_xpath '//simpara', output, 4 - assert_xpath '//ulink', output, 1 + assert_xpath '//link', output, 1 end # NOTE this test ensures we have the encoding line on block templates too test 'proper encoding to handle utf8 characters in arbitrary block' do input = [] input << "[verse]\n" - input.concat(File.readlines(sample_doc_path(:encoding))) + input += (File.readlines (sample_doc_path :encoding), mode: Asciidoctor::FILE_READ_MODE) doc = empty_document - reader = Asciidoctor::PreprocessorReader.new doc, input + reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true block = Asciidoctor::Parser.next_block(reader, doc) - assert_xpath '//pre', block.render.gsub(/^\s*\n/, ''), 1 + assert_xpath '//pre', block.convert.gsub(/^\s*\n/, ''), 1 end test 'proper encoding to handle utf8 characters from included file' do - input = <<-EOS -include::fixtures/encoding.asciidoc[tags=romé] - EOS - doc = empty_safe_document :base_dir => File.expand_path(File.dirname(__FILE__)) - reader = Asciidoctor::PreprocessorReader.new doc, input + input = 'include::fixtures/encoding.adoc[tags=romé]' + doc = empty_safe_document base_dir: testdir + reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true block = Asciidoctor::Parser.next_block(reader, doc) - output = block.render + output = block.convert assert_css '.paragraph', output, 1 end test 'escaped text markup' do assert_match(/All your <em>inline<\/em> markup belongs to <strong>us<\/strong>!/, - render_string('All your inline markup belongs to us!')) + convert_string('All your inline markup belongs to us!')) end test "line breaks" do - assert_xpath "//br", render_string("Well this is +\njust fine and dandy, isn't it?"), 1 + assert_xpath "//br", convert_string("Well this is +\njust fine and dandy, isn't it?"), 1 end test 'single- and double-quoted text' do - rendered = render_embedded_string(%q(``Where?,'' she said, flipping through her copy of `The New Yorker.'), :attributes => {'compat-mode' => ''}) - assert_match(/“Where\?,”/, rendered) - assert_match(/‘The New Yorker.’/, rendered) - - rendered = render_embedded_string(%q("`Where?,`" she said, flipping through her copy of '`The New Yorker.`')) - assert_match(/“Where\?,”/, rendered) - assert_match(/‘The New Yorker.’/, rendered) + output = convert_string_to_embedded(%q(``Where?,'' she said, flipping through her copy of `The New Yorker.'), attributes: { 'compat-mode' => '' }) + assert_match(/“Where\?,”/, output) + assert_match(/‘The New Yorker.’/, output) + + output = convert_string_to_embedded(%q("`Where?,`" she said, flipping through her copy of '`The New Yorker.`')) + assert_match(/“Where\?,”/, output) + assert_match(/‘The New Yorker.’/, output) end test 'multiple double-quoted text on a single line' do assert_equal '“Our business is constantly changing” or “We need faster time to market.”', - render_embedded_string(%q(``Our business is constantly changing'' or ``We need faster time to market.''), :doctype => :inline, :attributes => {'compat-mode' => ''}) + convert_inline_string(%q(``Our business is constantly changing'' or ``We need faster time to market.''), attributes: { 'compat-mode' => '' }) assert_equal '“Our business is constantly changing” or “We need faster time to market.”', - render_embedded_string(%q("`Our business is constantly changing`" or "`We need faster time to market.`"), :doctype => :inline) + convert_inline_string(%q("`Our business is constantly changing`" or "`We need faster time to market.`")) end test 'horizontal rule' do - input = <<-EOS -This line is separated by a horizontal rule... + input = <<~'EOS' + This line is separated by a horizontal rule... -''' + ''' -...from this line. + ...from this line. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath "//hr", output, 1 assert_xpath "/*[@class='paragraph']", output, 2 assert_xpath "(/*[@class='paragraph'])[1]/following-sibling::hr", output, 1 @@ -111,14 +106,14 @@ variants.each do |variant| offsets.each do |offset| - input = <<-EOS -This line is separated by a horizontal rule... + input = <<~EOS + This line is separated by a horizontal rule... -#{offset}#{variant} + #{offset}#{variant} -...from this line. + ...from this line. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath "//hr", output, 1 assert_xpath "/*[@class='paragraph']", output, 2 assert_xpath "(/*[@class='paragraph'])[1]/following-sibling::hr", output, 1 @@ -144,14 +139,14 @@ bad_variants.each do |variant| good_offsets.each do |offset| - input = <<-EOS -This line is separated something that is not a horizontal rule... + input = <<~EOS + This line is separated by something that is not a horizontal rule... -#{offset}#{variant} + #{offset}#{variant} -...from this line. + ...from this line. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//hr', output, 0 end end @@ -169,139 +164,141 @@ good_variants.each do |variant| bad_offsets.each do |offset| - input = <<-EOS -This line is separated something that is not a horizontal rule... + input = <<~EOS + This line is separated by something that is not a horizontal rule... -#{offset}#{variant} + #{offset}#{variant} -...from this line. + ...from this line. EOS - output = render_embedded_string input + output = convert_string_to_embedded input assert_xpath '//hr', output, 0 end end end test "emphasized text using underscore characters" do - assert_xpath "//em", render_string("An _emphatic_ no") + assert_xpath "//em", convert_string("An _emphatic_ no") end test 'emphasized text with single quote using apostrophe characters' do - rsquo = [8217].pack 'U*' - assert_xpath %(//em[text()="Johnny#{rsquo}s"]), render_string(%q(It's 'Johnny's' phone), :attributes => {'compat-mode' => ''}) - assert_xpath %(//p[text()="It#{rsquo}s 'Johnny#{rsquo}s' phone"]), render_string(%q(It's 'Johnny's' phone)) + rsquo = decode_char 8217 + assert_xpath %(//em[text()="Johnny#{rsquo}s"]), convert_string(%q(It's 'Johnny's' phone), attributes: { 'compat-mode' => '' }) + assert_xpath %(//p[text()="It#{rsquo}s 'Johnny#{rsquo}s' phone"]), convert_string(%q(It's 'Johnny's' phone)) end test 'emphasized text with escaped single quote using apostrophe characters' do - assert_xpath %(//em[text()="Johnny's"]), render_string(%q(It's 'Johnny\\'s' phone), :attributes => {'compat-mode' => ''}) - assert_xpath %(//p[text()="It's 'Johnny's' phone"]), render_string(%q(It\\'s 'Johnny\\'s' phone)) + assert_xpath %(//em[text()="Johnny's"]), convert_string(%q(It's 'Johnny\\'s' phone), attributes: { 'compat-mode' => '' }) + assert_xpath %(//p[text()="It's 'Johnny's' phone"]), convert_string(%q(It\\'s 'Johnny\\'s' phone)) end test "escaped single quote is restored as single quote" do - assert_xpath "//p[contains(text(), \"Let's do it!\")]", render_string("Let\\'s do it!") + assert_xpath "//p[contains(text(), \"Let's do it!\")]", convert_string("Let\\'s do it!") end test 'unescape escaped single quote emphasis in compat mode only' do - assert_xpath %(//p[text()="A 'single quoted string' example"]), render_embedded_string(%(A \\'single quoted string' example), :attributes => {'compat-mode' => ''}) - assert_xpath %(//p[text()="'single quoted string'"]), render_embedded_string(%(\\'single quoted string'), :attributes => {'compat-mode' => ''}) + assert_xpath %(//p[text()="A 'single quoted string' example"]), convert_string_to_embedded(%(A \\'single quoted string' example), attributes: { 'compat-mode' => '' }) + assert_xpath %(//p[text()="'single quoted string'"]), convert_string_to_embedded(%(\\'single quoted string'), attributes: { 'compat-mode' => '' }) - assert_xpath %(//p[text()="A \\'single quoted string' example"]), render_embedded_string(%(A \\'single quoted string' example)) - assert_xpath %(//p[text()="\\'single quoted string'"]), render_embedded_string(%(\\'single quoted string')) + assert_xpath %(//p[text()="A \\'single quoted string' example"]), convert_string_to_embedded(%(A \\'single quoted string' example)) + assert_xpath %(//p[text()="\\'single quoted string'"]), convert_string_to_embedded(%(\\'single quoted string')) end test "emphasized text at end of line" do - assert_xpath "//em", render_string("This library is _awesome_") + assert_xpath "//em", convert_string("This library is _awesome_") end test "emphasized text at beginning of line" do - assert_xpath "//em", render_string("_drop_ it") + assert_xpath "//em", convert_string("_drop_ it") end test "emphasized text across line" do - assert_xpath "//em", render_string("_check it_") + assert_xpath "//em", convert_string("_check it_") end test "unquoted text" do - refute_match(/#/, render_string("An #unquoted# word")) + refute_match(/#/, convert_string("An #unquoted# word")) end test 'backticks and straight quotes in text' do backslash = '\\' - assert_equal %q(run foo dog), render_embedded_string(%q(run `foo` 'dog'), :doctype => :inline, :attributes => {'compat-mode' => ''}) - assert_equal %q(run foo 'dog'), render_embedded_string(%q(run `foo` 'dog'), :doctype => :inline) - assert_equal %q(run `foo` 'dog'), render_embedded_string(%(run #{backslash}`foo` 'dog'), :doctype => :inline) - assert_equal %q(run ‘foo` 'dog’), render_embedded_string(%q(run '`foo` 'dog`'), :doctype => :inline) - assert_equal %q(run '`foo` 'dog`'), render_embedded_string(%(run #{backslash}'`foo` 'dog#{backslash}`'), :doctype => :inline) + assert_equal %q(run foo dog), convert_inline_string(%q(run `foo` 'dog'), attributes: { 'compat-mode' => '' }) + assert_equal %q(run foo 'dog'), convert_inline_string(%q(run `foo` 'dog')) + assert_equal %q(run `foo` 'dog'), convert_inline_string(%(run #{backslash}`foo` 'dog')) + assert_equal %q(run ‘foo` 'dog’), convert_inline_string(%q(run '`foo` 'dog`')) + assert_equal %q(run '`foo` 'dog`'), convert_inline_string(%(run #{backslash}'`foo` 'dog#{backslash}`')) end test 'plus characters inside single plus passthrough' do - assert_xpath '//p[text()="+"]', render_embedded_string('+++') - assert_xpath '//p[text()="+="]', render_embedded_string('++=+') + assert_xpath '//p[text()="+"]', convert_string_to_embedded('+++') + assert_xpath '//p[text()="+="]', convert_string_to_embedded('++=+') end test 'plus passthrough escapes entity reference' do - assert_match(/&#44;/, render_embedded_string('+,+')) - assert_match(/one&#44;two/, render_embedded_string('one++,++two')) + assert_match(/&#44;/, convert_string_to_embedded('+,+')) + assert_match(/one&#44;two/, convert_string_to_embedded('one++,++two')) end context "basic styling" do setup do - @rendered = render_string("A *BOLD* word. An _italic_ word. A `mono` word. ^superscript!^ and some ~subscript~.") + @output = convert_string("A *BOLD* word. An _italic_ word. A `mono` word. ^superscript!^ and some ~subscript~.") end test "strong" do - assert_xpath "//strong", @rendered, 1 + assert_xpath "//strong", @output, 1 end test "italic" do - assert_xpath "//em", @rendered, 1 + assert_xpath "//em", @output, 1 end test "monospaced" do - assert_xpath "//code", @rendered, 1 + assert_xpath "//code", @output, 1 end test "superscript" do - assert_xpath "//sup", @rendered, 1 + assert_xpath "//sup", @output, 1 end test "subscript" do - assert_xpath "//sub", @rendered, 1 + assert_xpath "//sub", @output, 1 end test "passthrough" do - assert_xpath "//code", render_string("This is +passed through+."), 0 - assert_xpath "//code", render_string("This is +passed through and monospaced+.", :attributes => {'compat-mode' => ''}), 1 + assert_xpath "//code", convert_string("This is +passed through+."), 0 + assert_xpath "//code", convert_string("This is +passed through and monospaced+.", attributes: { 'compat-mode' => '' }), 1 end test "nested styles" do - rendered = render_string("Winning *big _time_* in the +city *boyeeee*+.", :attributes => {'compat-mode' => ''}) + output = convert_string("Winning *big _time_* in the +city *boyeeee*+.", attributes: { 'compat-mode' => '' }) - assert_xpath "//strong/em", rendered - assert_xpath "//code/strong", rendered + assert_xpath "//strong/em", output + assert_xpath "//code/strong", output - rendered = render_string("Winning *big _time_* in the `city *boyeeee*`.") + output = convert_string("Winning *big _time_* in the `city *boyeeee*`.") - assert_xpath "//strong/em", rendered - assert_xpath "//code/strong", rendered + assert_xpath "//strong/em", output + assert_xpath "//code/strong", output end - test "unconstrained quotes" do - rendered_chars = render_string("**B**__I__++M++", :attributes => {'compat-mode' => ''}) - assert_xpath "//strong", rendered_chars - assert_xpath "//em", rendered_chars - assert_xpath "//code", rendered_chars + test 'unconstrained quotes' do + output = convert_string('**B**__I__++M++[role]++M++', attributes: { 'compat-mode' => '' }) + assert_xpath '//strong', output, 1 + assert_xpath '//em', output, 1 + assert_xpath '//code[not(@class)]', output, 1 + assert_xpath '//code[@class="role"]', output, 1 - rendered_chars = render_string("**B**__I__``M``") - assert_xpath "//strong", rendered_chars - assert_xpath "//em", rendered_chars - assert_xpath "//code", rendered_chars + output = convert_string('**B**__I__``M``[role]``M``') + assert_xpath '//strong', output, 1 + assert_xpath '//em', output, 1 + assert_xpath '//code[not(@class)]', output, 1 + assert_xpath '//code[@class="role"]', output, 1 end end test 'should format Asian characters as words' do - assert_xpath '//strong', (render_embedded_string 'bold *要* bold') - assert_xpath '//strong', (render_embedded_string 'bold *素* bold') - assert_xpath '//strong', (render_embedded_string 'bold *要素* bold') + assert_xpath '//strong', (convert_string_to_embedded 'bold *要* bold') + assert_xpath '//strong', (convert_string_to_embedded 'bold *素* bold') + assert_xpath '//strong', (convert_string_to_embedded 'bold *要素* bold') end end diff -Nru asciidoctor-1.5.5/.travis.yml asciidoctor-2.0.10/.travis.yml --- asciidoctor-1.5.5/.travis.yml 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/.travis.yml 2019-08-18 16:11:54.000000000 +0000 @@ -1,20 +1,33 @@ -sudo: false +dist: trusty git: - depth: 1 + # use depth 2 just in case two refs get pushed at once (like a tag) + depth: 2 language: ruby rvm: - - 2.3.1 - - 2.2.5 - - 2.1.10 - - 2.0.0 - - 1.9.3 - - 1.8.7 - - jruby-9.0.5.0 - - jruby-9.1.2.0 - - jruby-19mode # based on jruby-1.7.19 - - jruby-18mode # based on jruby-1.7.19 - #- rbx-3.60 # NOTE not working currently +- &release_ruby 2.6.3 +- 2.5.5 +- 2.4.6 +- 2.3.8 +- jruby-9.2.7.0 +- jruby-9.1.17.0 +# the test suite currently crashes on truffleruby +#- truffleruby-rc13 +env: + global: + # use system libraries to speed up installation of nokogiri + - NOKOGIRI_USE_SYSTEM_LIBRARIES=true + - PYGMENTS=true + - SOURCE_DATE_EPOCH=1521504000 script: bundle exec rake coverage test:all -notifications: - email: false - #irc: 'irc.freenode.org#asciidoctor' +after_success: bundle exec rake build:dependents +#notifications: +# email: false +# irc: 'irc.freenode.org#asciidoctor' +deploy: + provider: rubygems + gem: asciidoctor + api_key: ${RUBYGEMS_API_KEY} + on: + tags: true + repo: asciidoctor/asciidoctor + rvm: *release_ruby diff -Nru asciidoctor-1.5.5/.yardopts asciidoctor-2.0.10/.yardopts --- asciidoctor-1.5.5/.yardopts 2016-10-05 08:51:24.000000000 +0000 +++ asciidoctor-2.0.10/.yardopts 2019-08-18 16:11:54.000000000 +0000 @@ -1,5 +1,11 @@ ---exclude opal_ext +--charset UTF-8 +--readme README.adoc --hide-api private --plugin tomdoc --title "Asciidoctor API Documentation" --output-dir rdoc +lib/**/*.rb +- +CHANGELOG.adoc +CONTRIBUTING.adoc +LICENSE