summaryrefslogtreecommitdiff
path: root/mesonbuild/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'mesonbuild/scripts')
-rw-r--r--mesonbuild/scripts/depaccumulate.py129
-rw-r--r--mesonbuild/scripts/depscan.py133
2 files changed, 213 insertions, 49 deletions
diff --git a/mesonbuild/scripts/depaccumulate.py b/mesonbuild/scripts/depaccumulate.py
new file mode 100644
index 000000000..7576390d4
--- /dev/null
+++ b/mesonbuild/scripts/depaccumulate.py
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright © 2021-2024 Intel Corporation
+
+"""Accumulator for p1689r5 module dependencies.
+
+See: https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p1689r5.html
+"""
+
+from __future__ import annotations
+import json
+import re
+import textwrap
+import typing as T
+
+if T.TYPE_CHECKING:
+ from .depscan import Description, Rule
+
+# The quoting logic has been copied from the ninjabackend to avoid having to
+# import half of Meson just to quote outputs, which is a performance problem
+_QUOTE_PAT = re.compile(r'[$ :\n]')
+
+
+def quote(text: str) -> str:
+ # Fast path for when no quoting is necessary
+ if not _QUOTE_PAT.search(text):
+ return text
+ if '\n' in text:
+ errmsg = textwrap.dedent(f'''\
+ Ninja does not support newlines in rules. The content was:
+
+ {text}
+
+ Please report this error with a test case to the Meson bug tracker.''')
+ raise RuntimeError(errmsg)
+ return _QUOTE_PAT.sub(r'$\g<0>', text)
+
+
+_PROVIDER_CACHE: T.Dict[str, str] = {}
+
+
+def get_provider(rules: T.List[Rule], name: str) -> T.Optional[str]:
+ """Get the object that a module from another Target provides
+
+ We must rely on the object file here instead of the module itself, because
+ the object rule is part of the generated build.ninja, while the module is
+ only declared inside a dyndep. This creates for the dyndep generator to
+ depend on previous dyndeps as order deps. Since the module
+ interface file will be generated when the object is generated we can rely on
+ that in proxy and simplify generation.
+
+ :param rules: The list of rules to check
+ :param name: The logical-name to look for
+ :raises RuntimeError: If no provider can be found
+ :return: The object file of the rule providing the module
+ """
+ # Cache the result for performance reasons
+ if name in _PROVIDER_CACHE:
+ return _PROVIDER_CACHE[name]
+
+ for r in rules:
+ for p in r.get('provides', []):
+ if p['logical-name'] == name:
+ obj = r['primary-output']
+ _PROVIDER_CACHE[name] = obj
+ return obj
+ return None
+
+
+def process_rules(rules: T.List[Rule],
+ extra_rules: T.List[Rule],
+ ) -> T.Iterable[T.Tuple[str, T.Optional[T.List[str]], T.List[str]]]:
+ """Process the rules for this Target
+
+ :param rules: the rules for this target
+ :param extra_rules: the rules for all of the targets this one links with, to use their provides
+ :yield: A tuple of the output, the exported modules, and the consumed modules
+ """
+ for rule in rules:
+ prov: T.Optional[T.List[str]] = None
+ req: T.List[str] = []
+ if 'provides' in rule:
+ prov = [p['compiled-module-path'] for p in rule['provides']]
+ if 'requires' in rule:
+ for p in rule['requires']:
+ modfile = p.get('compiled-module-path')
+ if modfile is not None:
+ req.append(modfile)
+ else:
+ # We can't error if this is not found because of compiler
+ # provided modules
+ found = get_provider(extra_rules, p['logical-name'])
+ if found:
+ req.append(found)
+ yield rule['primary-output'], prov, req
+
+
+def formatter(files: T.Optional[T.List[str]]) -> str:
+ if files:
+ fmt = ' '.join(quote(f) for f in files)
+ return f'| {fmt}'
+ return ''
+
+
+def gen(outfile: str, desc: Description, extra_rules: T.List[Rule]) -> int:
+ with open(outfile, 'w', encoding='utf-8') as f:
+ f.write('ninja_dyndep_version = 1\n\n')
+
+ for obj, provides, requires in process_rules(desc['rules'], extra_rules):
+ ins = formatter(requires)
+ out = formatter(provides)
+ f.write(f'build {quote(obj)} {out}: dyndep {ins}\n\n')
+
+ return 0
+
+
+def run(args: T.List[str]) -> int:
+ assert len(args) >= 2, 'got wrong number of arguments!'
+ outfile, jsonfile, *jsondeps = args
+ with open(jsonfile, 'r', encoding='utf-8') as f:
+ desc: Description = json.load(f)
+
+ # All rules, necessary for fulfilling across TU and target boundaries
+ rules = desc['rules'].copy()
+ for dep in jsondeps:
+ with open(dep, encoding='utf-8') as f:
+ d: Description = json.load(f)
+ rules.extend(d['rules'])
+
+ return gen(outfile, desc, rules)
diff --git a/mesonbuild/scripts/depscan.py b/mesonbuild/scripts/depscan.py
index 44e805447..6bd5cde9a 100644
--- a/mesonbuild/scripts/depscan.py
+++ b/mesonbuild/scripts/depscan.py
@@ -1,22 +1,60 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 The Meson development team
-# Copyright © 2023 Intel Corporation
+# Copyright © 2023-2024 Intel Corporation
from __future__ import annotations
import collections
+import json
import os
import pathlib
import pickle
import re
import typing as T
-from ..backend.ninjabackend import ninja_quote
-
if T.TYPE_CHECKING:
- from typing_extensions import Literal
+ from typing_extensions import Literal, TypedDict, NotRequired
from ..backend.ninjabackend import TargetDependencyScannerInfo
+ Require = TypedDict(
+ 'Require',
+ {
+ 'logical-name': str,
+ 'compiled-module-path': NotRequired[str],
+ 'source-path': NotRequired[str],
+ 'unique-on-source-path': NotRequired[bool],
+ 'lookup-method': NotRequired[Literal['by-name', 'include-angle', 'include-quote']]
+ },
+ )
+
+ Provide = TypedDict(
+ 'Provide',
+ {
+ 'logical-name': str,
+ 'compiled-module-path': NotRequired[str],
+ 'source-path': NotRequired[str],
+ 'unique-on-source-path': NotRequired[bool],
+ 'is-interface': NotRequired[bool],
+ },
+ )
+
+ Rule = TypedDict(
+ 'Rule',
+ {
+ 'primary-output': NotRequired[str],
+ 'outputs': NotRequired[T.List[str]],
+ 'provides': NotRequired[T.List[Provide]],
+ 'requires': NotRequired[T.List[Require]],
+ }
+ )
+
+ class Description(TypedDict):
+
+ version: int
+ revision: int
+ rules: T.List[Rule]
+
+
CPP_IMPORT_RE = re.compile(r'\w*import ([a-zA-Z0-9]+);')
CPP_EXPORT_RE = re.compile(r'\w*export module ([a-zA-Z0-9]+);')
@@ -37,7 +75,7 @@ class DependencyScanner:
self.sources = self.target_data.sources
self.provided_by: T.Dict[str, str] = {}
self.exports: T.Dict[str, str] = {}
- self.needs: collections.defaultdict[str, T.List[str]] = collections.defaultdict(list)
+ self.imports: collections.defaultdict[str, T.List[str]] = collections.defaultdict(list)
self.sources_with_exports: T.List[str] = []
def scan_file(self, fname: str, lang: Literal['cpp', 'fortran']) -> None:
@@ -58,7 +96,7 @@ class DependencyScanner:
# In Fortran you have an using declaration also for the module
# you define in the same file. Prevent circular dependencies.
if needed not in modules_in_this_file:
- self.needs[fname].append(needed)
+ self.imports[fname].append(needed)
if export_match:
exported_module = export_match.group(1).lower()
assert exported_module not in modules_in_this_file
@@ -89,7 +127,7 @@ class DependencyScanner:
# submodule (a1:a2) a3 <- requires a1@a2.smod
#
# a3 does not depend on the a1 parent module directly, only transitively.
- self.needs[fname].append(parent_module_name_full)
+ self.imports[fname].append(parent_module_name_full)
def scan_cpp_file(self, fname: str) -> None:
fpath = pathlib.Path(fname)
@@ -98,7 +136,7 @@ class DependencyScanner:
export_match = CPP_EXPORT_RE.match(line)
if import_match:
needed = import_match.group(1)
- self.needs[fname].append(needed)
+ self.imports[fname].append(needed)
if export_match:
exported_module = export_match.group(1)
if exported_module in self.provided_by:
@@ -123,47 +161,44 @@ class DependencyScanner:
def scan(self) -> int:
for s, lang in self.sources:
self.scan_file(s, lang)
- with open(self.outfile, 'w', encoding='utf-8') as ofile:
- ofile.write('ninja_dyndep_version = 1\n')
- for src, lang in self.sources:
- objfilename = self.target_data.source2object[src]
- mods_and_submods_needed = []
- module_files_generated = []
- module_files_needed = []
- if src in self.sources_with_exports:
- module_files_generated.append(self.module_name_for(src, lang))
- if src in self.needs:
- for modname in self.needs[src]:
- if modname not in self.provided_by:
- # Nothing provides this module, we assume that it
- # comes from a dependency library somewhere and is
- # already built by the time this compilation starts.
- pass
- else:
- mods_and_submods_needed.append(modname)
-
- for modname in mods_and_submods_needed:
- provider_src = self.provided_by[modname]
- provider_modfile = self.module_name_for(provider_src, lang)
- # Prune self-dependencies
- if provider_src != src:
- module_files_needed.append(provider_modfile)
-
- quoted_objfilename = ninja_quote(objfilename, True)
- quoted_module_files_generated = [ninja_quote(x, True) for x in module_files_generated]
- quoted_module_files_needed = [ninja_quote(x, True) for x in module_files_needed]
- if quoted_module_files_generated:
- mod_gen = '| ' + ' '.join(quoted_module_files_generated)
- else:
- mod_gen = ''
- if quoted_module_files_needed:
- mod_dep = '| ' + ' '.join(quoted_module_files_needed)
- else:
- mod_dep = ''
- build_line = 'build {} {}: dyndep {}'.format(quoted_objfilename,
- mod_gen,
- mod_dep)
- ofile.write(build_line + '\n')
+ description: Description = {
+ 'version': 1,
+ 'revision': 0,
+ 'rules': [],
+ }
+ for src, lang in self.sources:
+ rule: Rule = {
+ 'primary-output': self.target_data.source2object[src],
+ 'requires': [],
+ 'provides': [],
+ }
+ if src in self.sources_with_exports:
+ rule['outputs'] = [self.module_name_for(src, lang)]
+ if src in self.imports:
+ for modname in self.imports[src]:
+ provider_src = self.provided_by.get(modname)
+ if provider_src == src:
+ continue
+ rule['requires'].append({
+ 'logical-name': modname,
+ })
+ if provider_src:
+ rule['requires'][-1].update({
+ 'source-path': provider_src,
+ 'compiled-module-path': self.module_name_for(provider_src, lang),
+ })
+ if src in self.exports:
+ modname = self.exports[src]
+ rule['provides'].append({
+ 'logical-name': modname,
+ 'source-path': src,
+ 'compiled-module-path': self.module_name_for(src, lang),
+ })
+ description['rules'].append(rule)
+
+ with open(self.outfile, 'w', encoding='utf-8') as f:
+ json.dump(description, f)
+
return 0
def run(args: T.List[str]) -> int: