summaryrefslogtreecommitdiff
path: root/mesonbuild/scripts/depscan.py
diff options
context:
space:
mode:
authorDylan Baker <dylan@pnwbakers.com>2024-03-11 12:35:25 -0700
committerDylan Baker <dylan@pnwbakers.com>2025-04-03 12:27:07 -0700
commitea344be9b017042fa206cb12e9fee95c1c22fae5 (patch)
treec2e064ea5ffd4ff7e6dae5d8a333af1172b6db94 /mesonbuild/scripts/depscan.py
parentcc815c4bcac055721ae359cbc757f50c10ed54ed (diff)
downloadmeson-ea344be9b017042fa206cb12e9fee95c1c22fae5.tar.gz
backend/ninja: use a two step process for dependency scanning
This splits the scanner into two discrete steps, one that scans the source files, and one that that reads in the dependency information and produces a dyndep. The scanner uses the JSON format from https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p1689r5.html, which is the same format the MSVC and Clang use for C++ modules scanning. This will allow us to more easily move to using MSVC and clang-scan-deps when possible. As an added bonus, this correctly tracks dependencies across TU and Target boundaries, unlike the previous implementation, which assumed that if it couldn't find a provider that everything was good, but could run into issues. Because of that limitation Fortran code had to fully depend on all of it's dependencies, transitive or not. Now, when using the dep scanner, we can remove that restriction, allowing more parallelism.
Diffstat (limited to 'mesonbuild/scripts/depscan.py')
-rw-r--r--mesonbuild/scripts/depscan.py133
1 files changed, 84 insertions, 49 deletions
diff --git a/mesonbuild/scripts/depscan.py b/mesonbuild/scripts/depscan.py
index 44e805447..6bd5cde9a 100644
--- a/mesonbuild/scripts/depscan.py
+++ b/mesonbuild/scripts/depscan.py
@@ -1,22 +1,60 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 The Meson development team
-# Copyright © 2023 Intel Corporation
+# Copyright © 2023-2024 Intel Corporation
from __future__ import annotations
import collections
+import json
import os
import pathlib
import pickle
import re
import typing as T
-from ..backend.ninjabackend import ninja_quote
-
if T.TYPE_CHECKING:
- from typing_extensions import Literal
+ from typing_extensions import Literal, TypedDict, NotRequired
from ..backend.ninjabackend import TargetDependencyScannerInfo
+ Require = TypedDict(
+ 'Require',
+ {
+ 'logical-name': str,
+ 'compiled-module-path': NotRequired[str],
+ 'source-path': NotRequired[str],
+ 'unique-on-source-path': NotRequired[bool],
+ 'lookup-method': NotRequired[Literal['by-name', 'include-angle', 'include-quote']]
+ },
+ )
+
+ Provide = TypedDict(
+ 'Provide',
+ {
+ 'logical-name': str,
+ 'compiled-module-path': NotRequired[str],
+ 'source-path': NotRequired[str],
+ 'unique-on-source-path': NotRequired[bool],
+ 'is-interface': NotRequired[bool],
+ },
+ )
+
+ Rule = TypedDict(
+ 'Rule',
+ {
+ 'primary-output': NotRequired[str],
+ 'outputs': NotRequired[T.List[str]],
+ 'provides': NotRequired[T.List[Provide]],
+ 'requires': NotRequired[T.List[Require]],
+ }
+ )
+
+ class Description(TypedDict):
+
+ version: int
+ revision: int
+ rules: T.List[Rule]
+
+
CPP_IMPORT_RE = re.compile(r'\w*import ([a-zA-Z0-9]+);')
CPP_EXPORT_RE = re.compile(r'\w*export module ([a-zA-Z0-9]+);')
@@ -37,7 +75,7 @@ class DependencyScanner:
self.sources = self.target_data.sources
self.provided_by: T.Dict[str, str] = {}
self.exports: T.Dict[str, str] = {}
- self.needs: collections.defaultdict[str, T.List[str]] = collections.defaultdict(list)
+ self.imports: collections.defaultdict[str, T.List[str]] = collections.defaultdict(list)
self.sources_with_exports: T.List[str] = []
def scan_file(self, fname: str, lang: Literal['cpp', 'fortran']) -> None:
@@ -58,7 +96,7 @@ class DependencyScanner:
# In Fortran you have an using declaration also for the module
# you define in the same file. Prevent circular dependencies.
if needed not in modules_in_this_file:
- self.needs[fname].append(needed)
+ self.imports[fname].append(needed)
if export_match:
exported_module = export_match.group(1).lower()
assert exported_module not in modules_in_this_file
@@ -89,7 +127,7 @@ class DependencyScanner:
# submodule (a1:a2) a3 <- requires a1@a2.smod
#
# a3 does not depend on the a1 parent module directly, only transitively.
- self.needs[fname].append(parent_module_name_full)
+ self.imports[fname].append(parent_module_name_full)
def scan_cpp_file(self, fname: str) -> None:
fpath = pathlib.Path(fname)
@@ -98,7 +136,7 @@ class DependencyScanner:
export_match = CPP_EXPORT_RE.match(line)
if import_match:
needed = import_match.group(1)
- self.needs[fname].append(needed)
+ self.imports[fname].append(needed)
if export_match:
exported_module = export_match.group(1)
if exported_module in self.provided_by:
@@ -123,47 +161,44 @@ class DependencyScanner:
def scan(self) -> int:
for s, lang in self.sources:
self.scan_file(s, lang)
- with open(self.outfile, 'w', encoding='utf-8') as ofile:
- ofile.write('ninja_dyndep_version = 1\n')
- for src, lang in self.sources:
- objfilename = self.target_data.source2object[src]
- mods_and_submods_needed = []
- module_files_generated = []
- module_files_needed = []
- if src in self.sources_with_exports:
- module_files_generated.append(self.module_name_for(src, lang))
- if src in self.needs:
- for modname in self.needs[src]:
- if modname not in self.provided_by:
- # Nothing provides this module, we assume that it
- # comes from a dependency library somewhere and is
- # already built by the time this compilation starts.
- pass
- else:
- mods_and_submods_needed.append(modname)
-
- for modname in mods_and_submods_needed:
- provider_src = self.provided_by[modname]
- provider_modfile = self.module_name_for(provider_src, lang)
- # Prune self-dependencies
- if provider_src != src:
- module_files_needed.append(provider_modfile)
-
- quoted_objfilename = ninja_quote(objfilename, True)
- quoted_module_files_generated = [ninja_quote(x, True) for x in module_files_generated]
- quoted_module_files_needed = [ninja_quote(x, True) for x in module_files_needed]
- if quoted_module_files_generated:
- mod_gen = '| ' + ' '.join(quoted_module_files_generated)
- else:
- mod_gen = ''
- if quoted_module_files_needed:
- mod_dep = '| ' + ' '.join(quoted_module_files_needed)
- else:
- mod_dep = ''
- build_line = 'build {} {}: dyndep {}'.format(quoted_objfilename,
- mod_gen,
- mod_dep)
- ofile.write(build_line + '\n')
+ description: Description = {
+ 'version': 1,
+ 'revision': 0,
+ 'rules': [],
+ }
+ for src, lang in self.sources:
+ rule: Rule = {
+ 'primary-output': self.target_data.source2object[src],
+ 'requires': [],
+ 'provides': [],
+ }
+ if src in self.sources_with_exports:
+ rule['outputs'] = [self.module_name_for(src, lang)]
+ if src in self.imports:
+ for modname in self.imports[src]:
+ provider_src = self.provided_by.get(modname)
+ if provider_src == src:
+ continue
+ rule['requires'].append({
+ 'logical-name': modname,
+ })
+ if provider_src:
+ rule['requires'][-1].update({
+ 'source-path': provider_src,
+ 'compiled-module-path': self.module_name_for(provider_src, lang),
+ })
+ if src in self.exports:
+ modname = self.exports[src]
+ rule['provides'].append({
+ 'logical-name': modname,
+ 'source-path': src,
+ 'compiled-module-path': self.module_name_for(src, lang),
+ })
+ description['rules'].append(rule)
+
+ with open(self.outfile, 'w', encoding='utf-8') as f:
+ json.dump(description, f)
+
return 0
def run(args: T.List[str]) -> int: