-
Notifications
You must be signed in to change notification settings - Fork 54
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Close #777. This is on by default for 0.10+. Setting "CMakeLists.txt" explicitly though will force it to be found, while the default will fallback on 3.15+. --------- Signed-off-by: Henry Schreiner <[email protected]>
- Loading branch information
Showing
17 changed files
with
647 additions
and
14 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
scikit\_build\_core.ast package | ||
=============================== | ||
|
||
.. automodule:: scikit_build_core.ast | ||
:members: | ||
:undoc-members: | ||
:show-inheritance: | ||
|
||
Submodules | ||
---------- | ||
|
||
scikit\_build\_core.ast.ast module | ||
---------------------------------- | ||
|
||
.. automodule:: scikit_build_core.ast.ast | ||
:members: | ||
:undoc-members: | ||
:show-inheritance: | ||
|
||
scikit\_build\_core.ast.tokenizer module | ||
---------------------------------------- | ||
|
||
.. automodule:: scikit_build_core.ast.tokenizer | ||
:members: | ||
:undoc-members: | ||
:show-inheritance: |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
from __future__ import annotations | ||
|
||
__all__: list[str] = [] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,95 @@ | ||
from __future__ import annotations | ||
|
||
import dataclasses | ||
import sys | ||
from pathlib import Path | ||
from typing import TYPE_CHECKING | ||
|
||
from .._logging import rich_print | ||
from .tokenizer import Token, TokenType, tokenize | ||
|
||
if TYPE_CHECKING: | ||
from collections.abc import Generator | ||
|
||
__all__ = ["Node", "Block", "parse"] | ||
|
||
|
||
def __dir__() -> list[str]: | ||
return __all__ | ||
|
||
|
||
@dataclasses.dataclass(frozen=True) | ||
class Node: | ||
__slots__ = ("name", "value", "start", "stop") | ||
|
||
name: str | ||
value: str | ||
start: int | ||
stop: int | ||
|
||
def __str__(self) -> str: | ||
return f"{self.name}({self.value})" | ||
|
||
|
||
@dataclasses.dataclass(frozen=True) | ||
class Block(Node): | ||
__slots__ = ("contents",) | ||
|
||
contents: list[Node] | ||
|
||
def __str__(self) -> str: | ||
return f"{super().__str__()} ... {len(self.contents)} children" | ||
|
||
|
||
def parse( | ||
tokens: Generator[Token, None, None], stop: str = "" | ||
) -> Generator[Node, None, None]: | ||
""" | ||
Generate a stream of nodes from a stream of tokens. This currently bundles all block-like functions | ||
into a single `Block` node, but this could be changed to be more specific eventually if needed. | ||
""" | ||
try: | ||
while True: | ||
token = next(tokens) | ||
if token.type != TokenType.UNQUOTED: | ||
continue | ||
name = token.value.lower() | ||
start = token.start | ||
token = next(tokens) | ||
if token.type == TokenType.WHITESPACE: | ||
token = next(tokens) | ||
if token.type != TokenType.OPEN_PAREN: | ||
msg = f"Expected open paren after {name!r}, got {token!r}" | ||
raise AssertionError(msg) | ||
count = 1 | ||
value = "" | ||
while True: | ||
token = next(tokens) | ||
if token.type == TokenType.OPEN_PAREN: | ||
count += 1 | ||
elif token.type == TokenType.CLOSE_PAREN: | ||
count -= 1 | ||
if count == 0: | ||
break | ||
value += token.value | ||
|
||
if name in {"if", "foreach", "while", "macro", "function", "block"}: | ||
contents = list(parse(tokens, f"end{name}")) | ||
yield Block(name, value, start, contents[-1].stop, contents) | ||
else: | ||
yield Node(name, value, start, token.stop) | ||
if stop and name == stop: | ||
break | ||
except StopIteration: | ||
pass | ||
|
||
|
||
if __name__ == "__main__": | ||
with Path(sys.argv[1]).open(encoding="utf-8-sig") as f: | ||
for node in parse(tokenize(f.read())): | ||
cnode = dataclasses.replace( | ||
node, | ||
name=f"[bold blue]{node.name}[/bold /blue]", | ||
value=f"[green]{node.value}[/green]", | ||
) | ||
rich_print(cnode) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,81 @@ | ||
from __future__ import annotations | ||
|
||
import dataclasses | ||
import enum | ||
import re | ||
import sys | ||
from pathlib import Path | ||
from typing import TYPE_CHECKING | ||
|
||
from .._logging import rich_print | ||
|
||
if TYPE_CHECKING: | ||
from collections.abc import Generator | ||
|
||
__all__ = ["Token", "TokenType", "tokenize"] | ||
|
||
|
||
def __dir__() -> list[str]: | ||
return __all__ | ||
|
||
|
||
TOKEN_EXPRS = { | ||
"BRACKET_COMMENT": r"\s*#\[(?P<bc1>=*)\[(?s:.)*?\](?P=bc1)\]", | ||
"COMMENT": r"#.*$", | ||
"QUOTED": r'"(?:\\(?s:.)|[^"\\])*?"', | ||
"BRACKET_QUOTE": r"\[(?P<bq1>=*)\[(?s:.)*?\](?P=bq1)\]", | ||
"OPEN_PAREN": r"\(", | ||
"CLOSE_PAREN": r"\)", | ||
"LEGACY": r'\b\w+=[^\s"()$\\]*(?:"[^"\\]*"[^\s"()$\\]*)*|"(?:[^"\\]*(?:\\.[^"\\]*)*)*"', | ||
"UNQUOTED": r"(?:\\.|[^\s()#\"\\])+", | ||
} | ||
|
||
|
||
class TokenType(enum.Enum): | ||
BRACKET_COMMENT = enum.auto() | ||
COMMENT = enum.auto() | ||
UNQUOTED = enum.auto() | ||
QUOTED = enum.auto() | ||
BRACKET_QUOTE = enum.auto() | ||
LEGACY = enum.auto() | ||
OPEN_PAREN = enum.auto() | ||
CLOSE_PAREN = enum.auto() | ||
WHITESPACE = enum.auto() | ||
|
||
|
||
@dataclasses.dataclass(frozen=True) | ||
class Token: | ||
__slots__ = ("type", "start", "stop", "value") | ||
|
||
type: TokenType | ||
start: int | ||
stop: int | ||
value: str | ||
|
||
def __str__(self) -> str: | ||
return f"{self.type.name}({self.value!r})" | ||
|
||
|
||
def tokenize(contents: str) -> Generator[Token, None, None]: | ||
tok_regex = "|".join(f"(?P<{n}>{v})" for n, v in TOKEN_EXPRS.items()) | ||
last = 0 | ||
for match in re.finditer(tok_regex, contents, re.MULTILINE): | ||
for typ, value in match.groupdict().items(): | ||
if typ in TOKEN_EXPRS and value is not None: | ||
if match.start() != last: | ||
yield Token( | ||
TokenType.WHITESPACE, | ||
last, | ||
match.start(), | ||
contents[last : match.start()], | ||
) | ||
last = match.end() | ||
yield Token(TokenType[typ], match.start(), match.end(), value) | ||
|
||
|
||
if __name__ == "__main__": | ||
with Path(sys.argv[1]).open(encoding="utf-8-sig") as f: | ||
for token in tokenize(f.read()): | ||
rich_print( | ||
f"[green]{token.type.name}[/green][red]([/red]{token.value}[red])[/red]" | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
from __future__ import annotations | ||
|
||
from ..ast.ast import parse | ||
from ..ast.tokenizer import tokenize | ||
|
||
__all__ = ["find_min_cmake_version"] | ||
|
||
|
||
def __dir__() -> list[str]: | ||
return __all__ | ||
|
||
|
||
def find_min_cmake_version(cmake_content: str) -> str | None: | ||
""" | ||
Locate the minimum required version. Return None if not found. | ||
""" | ||
for node in parse(tokenize(cmake_content)): | ||
if node.name == "cmake_minimum_required": | ||
return ( | ||
node.value.replace("VERSION", "") | ||
.replace("FATAL_ERROR", "") | ||
.split("...")[0] | ||
.strip() | ||
.strip("\"'[]=") | ||
) | ||
|
||
return None |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.