Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
jackdewinter authored Nov 25, 2024
1 parent 7111ba4 commit 6c8bf89
Show file tree
Hide file tree
Showing 21 changed files with 367 additions and 191 deletions.
4 changes: 4 additions & 0 deletions newdocs/src/changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@
- [Issue 1270](https://github.com/jackdewinter/pymarkdown/issues/1270)
- Fixed issue with Md027 not reporting line numbers properly within
anything except the first paragraph.
- [Issue 1272](https://github.com/jackdewinter/pymarkdown/issues/1272)
- Parsing of the FCB in certain cases was off, as was the text token
containing the code block's text. Resulted in the columns being
reported being indented less than expected.

<!--- pyml disable-next-line no-duplicate-heading-->
### Changed
Expand Down
8 changes: 4 additions & 4 deletions publish/coverage.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@
"projectName": "pymarkdown",
"reportSource": "pytest",
"branchLevel": {
"totalMeasured": 5465,
"totalCovered": 5465
"totalMeasured": 5489,
"totalCovered": 5489
},
"lineLevel": {
"totalMeasured": 21308,
"totalCovered": 21308
"totalMeasured": 21362,
"totalCovered": 21362
}
}

6 changes: 3 additions & 3 deletions publish/test-results.json
Original file line number Diff line number Diff line change
Expand Up @@ -1303,7 +1303,7 @@
"totalTests": 128,
"failedTests": 0,
"errorTests": 0,
"skippedTests": 4,
"skippedTests": 0,
"elapsedTimeInMilliseconds": 0
},
{
Expand Down Expand Up @@ -1620,10 +1620,10 @@
},
{
"name": "test.test_markdown_extra",
"totalTests": 262,
"totalTests": 263,
"failedTests": 0,
"errorTests": 0,
"skippedTests": 5,
"skippedTests": 3,
"elapsedTimeInMilliseconds": 0
},
{
Expand Down
109 changes: 85 additions & 24 deletions pymarkdown/inline/inline_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@
from pymarkdown.inline.inline_handler_helper import InlineHandlerHelper
from pymarkdown.inline.inline_helper import InlineHelper
from pymarkdown.inline.inline_text_block_helper import InlineTextBlockHelper
from pymarkdown.plugins.utils.leading_space_index_tracker import (
LeadingSpaceIndexTracker,
)
from pymarkdown.tokens.atx_heading_markdown_token import AtxHeadingMarkdownToken
from pymarkdown.tokens.block_quote_markdown_token import BlockQuoteMarkdownToken
from pymarkdown.tokens.list_start_markdown_token import ListStartMarkdownToken
Expand Down Expand Up @@ -69,13 +72,35 @@ def parse_inline(
else:
POGGER.info("-->not bq-")

lsi_tracker = LeadingSpaceIndexTracker()
token = coalesced_results[0]
if token.is_block_quote_start or token.is_list_start:
lsi_tracker.open_container(token)
# if token.is_block_quote_end or token.is_list_end:
assert not (token.is_block_quote_end or token.is_list_end)
# lsi_tracker.register_container_end(token)
# elif
lsi_tracker.track_since_last_non_end_token(token)
for coalesce_index in range(1, len(coalesced_results)):

token = coalesced_results[coalesce_index]

if not token.is_end_token or token.is_end_of_stream:
while lsi_tracker.have_any_registered_container_ends():
lsi_tracker.process_container_end(token)
if token.is_block_quote_start or token.is_list_start:
lsi_tracker.open_container(token)
elif token.is_block_quote_end or token.is_list_end:
lsi_tracker.register_container_end(token)
lsi_tracker.track_since_last_non_end_token(token)

InlineProcessor.__process_next_coalesce_item(
coalesced_results,
coalesce_index,
coalesced_list,
coalesced_stack,
parse_properties,
lsi_tracker,
)
return coalesced_list

Expand Down Expand Up @@ -131,6 +156,7 @@ def __process_next_coalesce_item(
coalesced_list: List[MarkdownToken],
coalesced_stack: List[MarkdownToken],
parse_properties: ParseBlockPassProperties,
lsi_tracker: LeadingSpaceIndexTracker,
) -> None:
POGGER.info("coalesced_results:$<", coalesced_list[-1])
POGGER.info("coalesced_stack:$<", coalesced_stack)
Expand All @@ -140,6 +166,7 @@ def __process_next_coalesce_item(
POGGER.info(
"$-->last->block->$", i, block_quote_token.leading_text_index
)

if coalesced_results[coalesce_index].is_text and (
coalesced_list[-1].is_paragraph
or coalesced_list[-1].is_setext_heading
Expand All @@ -148,7 +175,11 @@ def __process_next_coalesce_item(
):
if coalesced_list[-1].is_code_block:
processed_tokens = InlineProcessor.__parse_code_block(
coalesced_results, coalesce_index, coalesced_list, coalesced_stack
coalesced_results,
coalesce_index,
coalesced_list,
coalesced_stack,
lsi_tracker,
)
elif coalesced_list[-1].is_setext_heading:
processed_tokens = InlineProcessor.__parse_setext_heading(
Expand Down Expand Up @@ -290,47 +321,77 @@ def __parse_setext_heading(
)
return processed_tokens

@staticmethod
def __parse_code_block_coalesced(
coalesced_stack: List[MarkdownToken],
lsi_tracker: LeadingSpaceIndexTracker,
text_token: TextMarkdownToken,
) -> int:
new_column_number = 1
if coalesced_stack[-1].is_block_quote_start:
block_quote_token = cast(BlockQuoteMarkdownToken, coalesced_stack[-1])
assert block_quote_token.bleading_spaces, "Bleading spaces must be defined."
split_leading_spaces = block_quote_token.bleading_spaces.split(
ParserHelper.newline_character
)
split_index = lsi_tracker.get_tokens_block_quote_bleading_space_index(
text_token
)
else:
list_token = cast(ListStartMarkdownToken, coalesced_stack[-1])
assert (
list_token.leading_spaces is not None
), "Leading spaces must be defined."
split_leading_spaces = list_token.leading_spaces.split(
ParserHelper.newline_character
)
split_index = lsi_tracker.get_tokens_list_leading_space_index(text_token)
if split_index < len(split_leading_spaces):
new_column_number += len(split_leading_spaces[split_index])
if coalesced_stack[-1].is_list_start:
stack_index = len(coalesced_stack) - 2
while stack_index >= 0 and coalesced_stack[stack_index].is_list_start:
stack_index -= 1
if stack_index >= 0:
assert coalesced_stack[stack_index].is_block_quote_start
block_quote_token = cast(
BlockQuoteMarkdownToken, coalesced_stack[stack_index]
)
assert (
block_quote_token.bleading_spaces
), "Bleading spaces must be defined."
split_leading_spaces = block_quote_token.bleading_spaces.split(
ParserHelper.newline_character
)
split_index = lsi_tracker.get_tokens_list_leading_space_index(
text_token
)
new_column_number += len(split_leading_spaces[split_index])
return new_column_number

@staticmethod
def __parse_code_block(
coalesced_results: List[MarkdownToken],
coalesce_index: int,
coalesced_list: List[MarkdownToken],
coalesced_stack: List[MarkdownToken],
lsi_tracker: LeadingSpaceIndexTracker,
) -> List[MarkdownToken]:
assert coalesced_results[
coalesce_index
].is_text, "Coalesced tokens must be text."
text_token = cast(TextMarkdownToken, coalesced_results[coalesce_index])
encoded_text = InlineHelper.append_text("", text_token.token_text)
if coalesced_list[-1].is_fenced_code_block:
line_number_delta, new_column_number = 1, 1
line_number_delta = 1

# POGGER.info("coalesced_stack:$<", coalesced_stack)
if coalesced_stack:
if coalesced_stack[-1].is_block_quote_start:
block_quote_token = cast(
BlockQuoteMarkdownToken, coalesced_stack[-1]
)
assert (
block_quote_token.bleading_spaces
), "Bleading spaces must be defined."
split_leading_spaces = block_quote_token.bleading_spaces.split(
ParserHelper.newline_character
)
else:
list_token = cast(ListStartMarkdownToken, coalesced_stack[-1])
assert (
list_token.leading_spaces is not None
), "Leading spaces must be defined."
split_leading_spaces = list_token.leading_spaces.split(
ParserHelper.newline_character
)
new_column_number += (
(len(split_leading_spaces[1]))
if len(split_leading_spaces) >= 2
else (len(split_leading_spaces[0]))
new_column_number = InlineProcessor.__parse_code_block_coalesced(
coalesced_stack, lsi_tracker, text_token
)
else:
new_column_number = 1
leading_whitespace = text_token.extracted_whitespace
# POGGER.debug(">>$<<", text_token)
assert (
Expand Down
53 changes: 53 additions & 0 deletions pymarkdown/leaf_blocks/fenced_leaf_block_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,6 +381,48 @@ def __calculate_fenced_vars(
assert fence_string in original_line, "fence_string must be in original line"
return after_fence_index, adj_end, fence_string

@staticmethod
def __process_fenced_start_adjust(
parser_state: ParserState,
position_marker: PositionMarker,
new_tokens: List[MarkdownToken],
) -> None:
pre_fenced_block_index = len(parser_state.token_stack) - 2
if parser_state.token_stack[pre_fenced_block_index].is_list:
additional_column_delta = (
cast(
ListStackToken, parser_state.token_stack[pre_fenced_block_index]
).indent_level
- position_marker.index_indent
)
else:
additional_column_delta = 0
fenced_token = cast(FencedCodeBlockMarkdownToken, new_tokens[-1])
assert fenced_token.is_fenced_code_block
new_position_marker = PositionMarker(
line_number=fenced_token.line_number,
index_number=fenced_token.column_number - 1 + additional_column_delta,
text_to_parse="",
index_indent=0,
)
new_fenced_token = FencedCodeBlockMarkdownToken(
fence_character=fenced_token.fence_character,
fence_count=fenced_token.fence_count,
extracted_text=fenced_token.extracted_text,
pre_extracted_text=fenced_token.pre_extracted_text,
text_after_extracted_text=fenced_token.text_after_extracted_text,
pre_text_after_extracted_text=fenced_token.pre_text_after_extracted_text,
extracted_whitespace=fenced_token.extracted_whitespace,
extracted_whitespace_before_info_string=fenced_token.extracted_whitespace_before_info_string,
position_marker=new_position_marker,
)
fenced_stack_token = parser_state.token_stack[-1]
fenced_stack_token.reset_matching_markdown_token(new_fenced_token)
adjusted_token_list = new_tokens[:-1]
adjusted_token_list.append(new_fenced_token)
new_tokens.clear()
new_tokens.extend(adjusted_token_list)

# pylint: disable=too-many-arguments
@staticmethod
def __process_fenced_start(
Expand Down Expand Up @@ -422,6 +464,7 @@ def __process_fenced_start(
grab_bag,
)

POGGER.debug("new_tokens-->$<<", new_tokens)
POGGER.debug("StackToken-->$<<", parser_state.token_stack[-1])
POGGER.debug(
"StackToken>start_markdown_token-->$<<",
Expand All @@ -433,6 +476,7 @@ def __process_fenced_start(
if adjusted_corrected_prefix is not None
else None
)
before_correct_length = len(new_tokens)
LeafBlockHelper.correct_for_leaf_block_start_in_list(
parser_state,
position_marker.index_indent,
Expand All @@ -442,6 +486,15 @@ def __process_fenced_start(
alt_removed_chars_at_start=removed_char_length,
original_line=original_line,
)

# TODO restructure this so we do not have to do this kludge
# need to place the correct_for_leaf_block_start_in_list call before
# we create the fenced token
POGGER.debug("new_tokens-->$<<", new_tokens)
if before_correct_length != len(new_tokens):
FencedLeafBlockProcessor.__process_fenced_start_adjust(
parser_state, position_marker, new_tokens
)
return new_tokens

# pylint: enable=too-many-arguments
Expand Down
5 changes: 5 additions & 0 deletions pymarkdown/plugins/rule_md_012.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,11 @@ def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:
if token.is_block_quote_end or token.is_list_end:
self.__process_pending_container_end_fixes(context, token)

if not token.is_end_token or token.is_end_of_stream:
while (
self.__leading_space_index_tracker.have_any_registered_container_ends()
):
self.__leading_space_index_tracker.process_container_end(token)
if token.is_block_quote_start or token.is_list_start:
self.__leading_space_index_tracker.open_container(token)
elif token.is_block_quote_end or token.is_list_end:
Expand Down
40 changes: 21 additions & 19 deletions pymarkdown/plugins/rule_md_031.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,26 +576,28 @@ def __fix_spacing_with_else(
self.register_replace_tokens_request(context, token, token, replacement_tokens)

def __calc_kludge_one(self, at_least_one_container: bool) -> bool:
is_kludge_one = False
if at_least_one_container:
last_stack_token = (
self.__leading_space_index_tracker.get_container_stack_item(-1)
)
if last_stack_token.is_list_start and self.__x1[-1].is_block_quote_start:
is_kludge_one = not any(i.is_block_quote_start for i in self.__x1[:-1])
return is_kludge_one
if not at_least_one_container:
return False
last_stack_token = self.__leading_space_index_tracker.get_container_stack_item(
-1
)
return (
not any(i.is_block_quote_start for i in self.__x1[:-1])
if last_stack_token.is_list_start and self.__x1[-1].is_block_quote_start
else False
)

def __calc_2(self, context: PluginScanContext, did_process_removals: bool) -> bool:
# def __calc_2(self, context: PluginScanContext, did_process_removals: bool) -> bool:

# This will most likely need rewriting for deeper nestings.
if (
not did_process_removals
and len(self.__x1) == 2
and self.__x1[0].is_block_quote_start
and self.__x1[1].is_list_start
):
did_process_removals = self.__apply_tailing_block_quote_fix(0, context)
return did_process_removals
# This will most likely need rewriting for deeper nestings.
# if (
# not did_process_removals
# and len(self.__x1) == 2
# and self.__x1[0].is_block_quote_start
# and self.__x1[1].is_list_start
# ):
# did_process_removals = self.__apply_tailing_block_quote_fix(0, context)
# return did_process_removals

def __calc_3(
self,
Expand Down Expand Up @@ -683,7 +685,7 @@ def __fix_spacing(
or not at_least_one_container
)

did_process_removals = self.__calc_2(context, did_process_removals)
# did_process_removals = self.__calc_2(context, did_process_removals)
did_process_removals, upgrade_kludge = self.__calc_3(
context, did_process_removals, at_least_one_container, upgrade_kludge
)
Expand Down
10 changes: 5 additions & 5 deletions pymarkdown/plugins/utils/leading_space_index_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,11 +206,11 @@ def get_tokens_list_leading_space_index(
container_index = alternate_index
last_closed_container_info = self.get_closed_container_info(-1)

assert not last_closed_container_info.adjustment
# if last_closed_container_info.adjustment:
# adjust = 2 if container_index >= 0 else 1
# else:
adjust = self.__calculate_adjust(initial_index, container_index)
# assert not last_closed_container_info.adjustment
if last_closed_container_info.adjustment:
adjust = 2 if container_index >= 0 else 1
else:
adjust = self.__calculate_adjust(initial_index, container_index)
# endif

index = (
Expand Down
Loading

0 comments on commit 6c8bf89

Please sign in to comment.