diff --git a/.gitignore b/.gitignore index ec7acfdd91..2740a1293e 100644 --- a/.gitignore +++ b/.gitignore @@ -156,7 +156,6 @@ /tools/gyp /tools/isolate_driver.py /tools/luci-go -/tools/mb /tools/memory /tools/protoc_wrapper /tools/python diff --git a/setup_links.py b/setup_links.py index 30b7a9ab27..637f95babc 100755 --- a/setup_links.py +++ b/setup_links.py @@ -72,7 +72,6 @@ DIRECTORIES = [ 'tools/grit', 'tools/gyp', 'tools/luci-go', - 'tools/mb', 'tools/memory', 'tools/protoc_wrapper', 'tools/python', diff --git a/tools/mb/OWNERS b/tools/mb/OWNERS new file mode 100644 index 0000000000..6b8033c204 --- /dev/null +++ b/tools/mb/OWNERS @@ -0,0 +1,2 @@ +kjellander@webrtc.org +ehmaldonado@webrtc.org diff --git a/tools/mb/PRESUBMIT.py b/tools/mb/PRESUBMIT.py new file mode 100644 index 0000000000..a520ff9b29 --- /dev/null +++ b/tools/mb/PRESUBMIT.py @@ -0,0 +1,48 @@ +# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + + +def _CommonChecks(input_api, output_api): + results = [] + + # Run Pylint over the files in the directory. + pylint_checks = input_api.canned_checks.GetPylint(input_api, output_api) + results.extend(input_api.RunTests(pylint_checks)) + + # Run the MB unittests. + results.extend(input_api.canned_checks.RunUnitTestsInDirectory( + input_api, output_api, '.', [ r'^.+_unittest\.py$'])) + + # Validate the format of the mb_config.pyl file. + mb_config_path = input_api.os_path.join(input_api.PresubmitLocalPath(), + '..', '..', 'webrtc', 'build', + 'mb_config.pyl') + cmd = [input_api.python_executable, 'mb.py', 'validate', '--config-file', + mb_config_path] + kwargs = {'cwd': input_api.PresubmitLocalPath()} + results.extend(input_api.RunTests([ + input_api.Command(name='mb_validate', + cmd=cmd, kwargs=kwargs, + message=output_api.PresubmitError)])) + + results.extend( + input_api.canned_checks.CheckLongLines( + input_api, + output_api, + maxlen=80, + source_file_filter=lambda x: 'mb_config.pyl' in x.LocalPath())) + + return results + + +def CheckChangeOnUpload(input_api, output_api): + return _CommonChecks(input_api, output_api) + + +def CheckChangeOnCommit(input_api, output_api): + return _CommonChecks(input_api, output_api) diff --git a/tools/mb/README.md b/tools/mb/README.md new file mode 100644 index 0000000000..4e73a8e9fc --- /dev/null +++ b/tools/mb/README.md @@ -0,0 +1,22 @@ +# MB - The Meta-Build wrapper + +MB is a simple wrapper intended to provide a uniform interface to either +GYP or GN, such that users and bots can call one script and not need to +worry about whether a given bot is meant to use GN or GYP. + +It supports two main functions: + +1. "gen" - the main `gyp_chromium` / `gn gen` invocation that generates the + Ninja files needed for the build. + +2. "analyze" - the step that takes a list of modified files and a list of + desired targets and reports which targets will need to be rebuilt. + +We also use MB as a forcing function to collect all of the different +build configurations that we actually support for Chromium builds into +one place, in `//tools/mb/mb_config.pyl`. + +For more information, see: + +* [The User Guide](docs/user_guide.md) +* [The Design Spec](docs/design_spec.md) diff --git a/tools/mb/docs/README.md b/tools/mb/docs/README.md new file mode 100644 index 0000000000..f29007d9ed --- /dev/null +++ b/tools/mb/docs/README.md @@ -0,0 +1,4 @@ +# The MB (Meta-Build wrapper) documentation + +* The [User Guide](user_guide.md) +* The [Design Spec](design_spec.md) diff --git a/tools/mb/docs/design_spec.md b/tools/mb/docs/design_spec.md new file mode 100644 index 0000000000..33fda806e8 --- /dev/null +++ b/tools/mb/docs/design_spec.md @@ -0,0 +1,426 @@ +# The MB (Meta-Build wrapper) design spec + +[TOC] + +## Intro + +MB is intended to address two major aspects of the GYP -> GN transition +for Chromium: + +1. "bot toggling" - make it so that we can easily flip a given bot + back and forth between GN and GYP. + +2. "bot configuration" - provide a single source of truth for all of + the different configurations (os/arch/`gyp_define` combinations) of + Chromium that are supported. + +MB must handle at least the `gen` and `analyze` steps on the bots, i.e., +we need to wrap both the `gyp_chromium` invocation to generate the +Ninja files, and the `analyze` step that takes a list of modified files +and a list of targets to build and returns which targets are affected by +the files. + +For more information on how to actually use MB, see +[the user guide](user_guide.md). + +## Design + +MB is intended to be as simple as possible, and to defer as much work as +possible to GN or GYP. It should live as a very simple Python wrapper +that offers little in the way of surprises. + +### Command line + +It is structured as a single binary that supports a list of subcommands: + +* `mb gen -c linux_rel_bot //out/Release` +* `mb analyze -m tryserver.chromium.linux -b linux_rel /tmp/input.json /tmp/output.json` + +### Configurations + +`mb` will first look for a bot config file in a set of different locations +(initially just in //ios/build/bots). Bot config files are JSON files that +contain keys for 'GYP_DEFINES' (a list of strings that will be joined together +with spaces and passed to GYP, or a dict that will be similarly converted), +'gn_args' (a list of strings that will be joined together), and an +'mb_type' field that says whether to use GN or GYP. Bot config files +require the full list of settings to be given explicitly. + +If no matching bot config file is found, `mb` looks in the +`//tools/mb/mb_config.pyl` config file to determine whether to use GYP or GN +for a particular build directory, and what set of flags (`GYP_DEFINES` or `gn +args`) to use. + +A config can either be specified directly (useful for testing) or by specifying +the master name and builder name (useful on the bots so that they do not need +to specify a config directly and can be hidden from the details). + +See the [user guide](user_guide.md#mb_config.pyl) for details. + +### Handling the analyze step + +The interface to `mb analyze` is described in the +[user\_guide](user_guide.md#mb_analyze). + +The way analyze works can be subtle and complicated (see below). + +Since the interface basically mirrors the way the "analyze" step on the bots +invokes `gyp_chromium` today, when the config is found to be a gyp config, +the arguments are passed straight through. + +It implements the equivalent functionality in GN by calling `gn refs +[list of files] --type=executable --all --as=output` and filtering the +output to match the list of targets. + +## Analyze + +The goal of the `analyze` step is to speed up the cycle time of the try servers +by only building and running the tests affected by the files in a patch, rather +than everything that might be out of date. Doing this ends up being tricky. + +We start with the following requirements and observations: + +* In an ideal (un-resource-constrained) world, we would build and test + everything that a patch affected on every patch. This does not + necessarily mean that we would build 'all' on every patch (see below). + +* In the real world, however, we do not have an infinite number of machines, + and try jobs are not infinitely fast, so we need to balance the desire + to get maximum test coverage against the desire to have reasonable cycle + times, given the number of machines we have. + +* Also, since we run most try jobs against tip-of-tree Chromium, by + the time one job completes on the bot, new patches have probably landed, + rendering the build out of date. + +* This means that the next try job may have to do a build that is out of + date due to a combination of files affected by a given patch, and files + affected for unrelated reasons. We want to rebuild and test only the + targets affected by the patch, so that we don't blame or punish the + patch author for unrelated changes. + +So: + +1. We need a way to indicate which changed files we care about and which + we don't (the affected files of a patch). + +2. We need to know which tests we might potentially want to run, and how + those are mapped onto build targets. For some kinds of tests (like + GTest-based tests), the mapping is 1:1 - if you want to run base_unittests, + you need to build base_unittests. For others (like the telemetry and + layout tests), you might need to build several executables in order to + run the tests, and that mapping might best be captured by a *meta* + target (a GN group or a GYP 'none' target like `webkit_tests`) that + depends on the right list of files. Because the GN and GYP files know + nothing about test steps, we have to have some way of mapping back + and forth between test steps and build targets. That mapping + is *not* currently available to MB (or GN or GYP), and so we have to + enough information to make it possible for the caller to do the mapping. + +3. We might also want to know when test targets are affected by data files + that aren't compiled (python scripts, or the layout tests themselves). + There's no good way to do this in GYP, but GN supports this. + +4. We also want to ensure that particular targets still compile even if they + are not actually tested; consider testing the installers themselves, or + targets that don't yet have good test coverage. We might want to use meta + targets for this purpose as well. + +5. However, for some meta targets, we don't necessarily want to rebuild the + meta target itself, perhaps just the dependencies of the meta target that + are affected by the patch. For example, if you have a meta target like + `blink_tests` that might depend on ten different test binaries. If a patch + only affects one of them (say `wtf_unittests`), you don't want to + build `blink_tests`, because that might actually also build the other nine + targets. In other words, some meta targets are *prunable*. + +6. As noted above, in the ideal case we actually have enough resources and + things are fast enough that we can afford to build everything affected by a + patch, but listing every possible target explicitly would be painful. The + GYP and GN Ninja generators provide an 'all' target that captures (nearly, + see [crbug.com/503241](crbug.com/503241)) everything, but unfortunately + neither GN nor GYP actually represents 'all' as a meta target in the build + graph, so we will need to write code to handle that specially. + +7. In some cases, we will not be able to correctly analyze the build graph to + determine the impact of a patch, and need to bail out (e.g,. if you change a + build file itself, it may not be easy to tell how that affects the graph). + In that case we should simply build and run everything. + +The interaction between 2) and 5) means that we need to treat meta targets +two different ways, and so we need to know which targets should be +pruned in the sense of 5) and which targets should be returned unchanged +so that we can map them back to the appropriate tests. + +So, we need three things as input: + +* `files`: the list of files in the patch +* `test_targets`: the list of ninja targets which, if affected by a patch, + should be reported back so that we can map them back to the appropriate + tests to run. Any meta targets in this list should *not* be pruned. +* `additional_compile_targets`: the list of ninja targets we wish to compile + *in addition to* the list in `test_targets`. Any meta targets + present in this list should be pruned (we don't need to return the + meta targets because they aren't mapped back to tests, and we don't want + to build them because we might build too much). + +We can then return two lists as output: + +* `compile_targets`, which is a list of pruned targets to be + passed to Ninja to build. It is acceptable to replace a list of + pruned targets by a meta target if it turns out that all of the + dependendencies of the target are affected by the patch (i.e., + all ten binaries that blink_tests depends on), but doing so is + not required. +* `test_targets`, which is a list of unpruned targets to be mapped + back to determine which tests to run. + +There may be substantial overlap between the two lists, but there is +no guarantee that one is a subset of the other and the two cannot be +used interchangeably or merged together without losing information and +causing the wrong thing to happen. + +The implementation is responsible for recognizing 'all' as a magic string +and mapping it onto the list of all root nodes in the build graph. + +There may be files listed in the input that don't actually exist in the build +graph: this could be either the result of an error (the file should be in the +build graph, but isn't), or perfectly fine (the file doesn't affect the build +graph at all). We can't tell these two apart, so we should ignore missing +files. + +There may be targets listed in the input that don't exist in the build +graph; unlike missing files, this can only indicate a configuration error, +and so we should return which targets are missing so the caller can +treat this as an error, if so desired. + +Any of the three inputs may be an empty list: + +* It normally doesn't make sense to call analyze at all if no files + were modified, but in rare cases we can hit a race where we try to + test a patch after it has already been committed, in which case + the list of modified files is empty. We should return 'no dependency' + in that case. + +* Passing an empty list for one or the other of test_targets and + additional_compile_targets is perfectly sensible: in the former case, + it can indicate that you don't want to run any tests, and in the latter, + it can indicate that you don't want to do build anything else in + addition to the test targets. + +* It doesn't make sense to call analyze if you don't want to compile + anything at all, so passing [] for both test_targets and + additional_compile_targets should probably return an error. + +In the output case, an empty list indicates that there was nothing to +build, or that there were no affected test targets as appropriate. + +Note that passing no arguments to Ninja is equivalent to passing +`all` to Ninja (at least given how GN and GYP work); however, we +don't want to take advantage of this in most cases because we don't +actually want to build every out of date target, only the targets +potentially affected by the files. One could try to indicate +to analyze that we wanted to use no arguments instead of an empty +list, but using the existing fields for this seems fragile and/or +confusing, and adding a new field for this seems unwarranted at this time. + +There is an "error" field in case something goes wrong (like the +empty file list case, above, or an internal error in MB/GYP/GN). The +analyze code should also return an error code to the shell if appropriate +to indicate that the command failed. + +In the case where build files themselves are modified and analyze may +not be able to determine a correct answer (point 7 above, where we return +"Found dependency (all)"), we should also return the `test_targets` unmodified +and return the union of `test_targets` and `additional_compile_targets` for +`compile_targets`, to avoid confusion. + +### Examples + +Continuing the example given above, suppose we have the following build +graph: + +* `blink_tests` is a meta target that depends on `webkit_unit_tests`, + `wtf_unittests`, and `webkit_tests` and represents all of the targets + needed to fully test Blink. Each of those is a separate test step. +* `webkit_tests` is also a meta target; it depends on `content_shell` + and `image_diff`. +* `base_unittests` is a separate test binary. +* `wtf_unittests` depends on `Assertions.cpp` and `AssertionsTest.cpp`. +* `webkit_unit_tests` depends on `WebNode.cpp` and `WebNodeTest.cpp`. +* `content_shell` depends on `WebNode.cpp` and `Assertions.cpp`. +* `base_unittests` depends on `logging.cc` and `logging_unittest.cc`. + +#### Example 1 + +We wish to run 'wtf_unittests' and 'webkit_tests' on a bot, but not +compile any additional targets. + +If a patch touches WebNode.cpp, then analyze gets as input: + + { + "files": ["WebNode.cpp"], + "test_targets": ["wtf_unittests", "webkit_tests"], + "additional_compile_targets": [] + } + +and should return as output: + + { + "status": "Found dependency", + "compile_targets": ["webkit_unit_tests"], + "test_targets": ["webkit_tests"] + } + +Note how `webkit_tests` was pruned in compile_targets but not in test_targets. + +#### Example 2 + +Using the same patch as Example 1, assume we wish to run only `wtf_unittests`, +but additionally build everything needed to test Blink (`blink_tests`): + +We pass as input: + + { + "files": ["WebNode.cpp"], + "test_targets": ["wtf_unittests"], + "additional_compile_targets": ["blink_tests"] + } + +And should get as output: + + { + "status": "Found dependency", + "compile_targets": ["webkit_unit_tests"], + "test_targets": [] + } + +Here `blink_tests` was pruned in the output compile_targets, and +test_targets was empty, since blink_tests was not listed in the input +test_targets. + +#### Example 3 + +Build everything, but do not run any tests. + +Input: + + { + "files": ["WebNode.cpp"], + "test_targets": [], + "additional_compile_targets": ["all"] + } + +Output: + + { + "status": "Found dependency", + "compile_targets": ["webkit_unit_tests", "content_shell"], + "test_targets": [] + } + +#### Example 4 + +Same as Example 2, but a build file was modified instead of a source file. + +Input: + + { + "files": ["BUILD.gn"], + "test_targets": ["wtf_unittests"], + "additional_compile_targets": ["blink_tests"] + } + +Output: + + { + "status": "Found dependency (all)", + "compile_targets": ["webkit_unit_tests", "wtf_unittests"], + "test_targets": ["wtf_unittests"] + } + +test_targets was returned unchanged, compile_targets was pruned. + +## Random Requirements and Rationale + +This section is collection of semi-organized notes on why MB is the way +it is ... + +### in-tree or out-of-tree + +The first issue is whether or not this should exist as a script in +Chromium at all; an alternative would be to simply change the bot +configurations to know whether to use GYP or GN, and which flags to +pass. + +That would certainly work, but experience over the past two years +suggests a few things: + + * we should push as much logic as we can into the source repositories + so that they can be versioned and changed atomically with changes to + the product code; having to coordinate changes between src/ and + build/ is at best annoying and can lead to weird errors. + * the infra team would really like to move to providing + product-independent services (i.e., not have to do one thing for + Chromium, another for NaCl, a third for V8, etc.). + * we found that during the SVN->GIT migration the ability to flip bot + configurations between the two via changes to a file in chromium + was very useful. + +All of this suggests that the interface between bots and Chromium should +be a simple one, hiding as much of the chromium logic as possible. + +### Why not have MB be smarter about de-duping flags? + +This just adds complexity to the MB implementation, and duplicates logic +that GYP and GN already have to support anyway; in particular, it might +require MB to know how to parse GYP and GN values. The belief is that +if MB does *not* do this, it will lead to fewer surprises. + +It will not be hard to change this if need be. + +### Integration w/ gclient runhooks + +On the bots, we will disable `gyp_chromium` as part of runhooks (using +`GYP_CHROMIUM_NO_ACTION=1`), so that mb shows up as a separate step. + +At the moment, we expect most developers to either continue to use +`gyp_chromium` in runhooks or to disable at as above if they have no +use for GYP at all. We may revisit how this works once we encourage more +people to use GN full-time (i.e., we might take `gyp_chromium` out of +runhooks altogether). + +### Config per flag set or config per (os/arch/flag set)? + +Currently, mb_config.pyl does not specify the host_os, target_os, host_cpu, or +target_cpu values for every config that Chromium runs on, it only specifies +them for when the values need to be explicitly set on the command line. + +Instead, we have one config per unique combination of flags only. + +In other words, rather than having `linux_rel_bot`, `win_rel_bot`, and +`mac_rel_bot`, we just have `rel_bot`. + +This design allows us to determine easily all of the different sets +of flags that we need to support, but *not* which flags are used on which +host/target combinations. + +It may be that we should really track the latter. Doing so is just a +config file change, however. + +### Non-goals + +* MB is not intended to replace direct invocation of GN or GYP for + complicated build scenarios (aka ChromeOS), where multiple flags need + to be set to user-defined paths for specific toolchains (e.g., where + ChromeOS needs to specify specific board types and compilers). + +* MB is not intended at this time to be something developers use frequently, + or to add a lot of features to. We hope to be able to get rid of it once + the GYP->GN migration is done, and so we should not add things for + developers that can't easily be added to GN itself. + +* MB is not intended to replace the + [CR tool](https://code.google.com/p/chromium/wiki/CRUserManual). Not + only is it only intended to replace the gyp\_chromium part of `'gclient + runhooks'`, it is not really meant as a developer-facing tool. diff --git a/tools/mb/docs/user_guide.md b/tools/mb/docs/user_guide.md new file mode 100644 index 0000000000..9817553bf6 --- /dev/null +++ b/tools/mb/docs/user_guide.md @@ -0,0 +1,297 @@ +# The MB (Meta-Build wrapper) user guide + +[TOC] + +## Introduction + +`mb` is a simple python wrapper around the GYP and GN meta-build tools to +be used as part of the GYP->GN migration. + +It is intended to be used by bots to make it easier to manage the configuration +each bot builds (i.e., the configurations can be changed from chromium +commits), and to consolidate the list of all of the various configurations +that Chromium is built in. + +Ideally this tool will no longer be needed after the migration is complete. + +For more discussion of MB, see also [the design spec](design_spec.md). + +## MB subcommands + +### `mb analyze` + +`mb analyze` is reponsible for determining what targets are affected by +a list of files (e.g., the list of files in a patch on a trybot): + +``` +mb analyze -c chromium_linux_rel //out/Release input.json output.json +``` + +Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags +must be specified so that `mb` can figure out which config to use. + +The first positional argument must be a GN-style "source-absolute" path +to the build directory. + +The second positional argument is a (normal) path to a JSON file containing +a single object with the following fields: + + * `files`: an array of the modified filenames to check (as paths relative to + the checkout root). + * `test_targets`: an array of (ninja) build targets that needed to run the + tests we wish to run. An empty array will be treated as if there are + no tests that will be run. + * `additional_compile_targets`: an array of (ninja) build targets that + reflect the stuff we might want to build *in addition to* the list + passed in `test_targets`. Targets in this list will be treated + specially, in the following way: if a given target is a "meta" + (GN: group, GYP: none) target like 'blink_tests' or + 'chromium_builder_tests', or even the ninja-specific 'all' target, + then only the *dependencies* of the target that are affected by + the modified files will be rebuilt (not the target itself, which + might also cause unaffected dependencies to be rebuilt). An empty + list will be treated as if there are no additional targets to build. + Empty lists for both `test_targets` and `additional_compile_targets` + would cause no work to be done, so will result in an error. + * `targets`: a legacy field that resembled a union of `compile_targets` + and `test_targets`. Support for this field will be removed once the + bots have been updated to use compile_targets and test_targets instead. + +The third positional argument is a (normal) path to where mb will write +the result, also as a JSON object. This object may contain the following +fields: + + * `error`: this should only be present if something failed. + * `compile_targets`: the list of ninja targets that should be passed + directly to the corresponding ninja / compile.py invocation. This + list may contain entries that are *not* listed in the input (see + the description of `additional_compile_targets` above and + [design_spec.md](the design spec) for how this works). + * `invalid_targets`: a list of any targets that were passed in + either of the input lists that weren't actually found in the graph. + * `test_targets`: the subset of the input `test_targets` that are + potentially out of date, indicating that the matching test steps + should be re-run. + * `targets`: a legacy field that indicates the subset of the input `targets` + that depend on the input `files`. + * `build_targets`: a legacy field that indicates the minimal subset of + targets needed to build all of `targets` that were affected. + * `status`: a field containing one of three strings: + + * `"Found dependency"` (build the `compile_targets`) + * `"No dependency"` (i.e., no build needed) + * `"Found dependency (all)"` (`test_targets` is returned as-is; + `compile_targets` should contain the union of `test_targets` and + `additional_compile_targets`. In this case the targets do not + need to be pruned). + +See [design_spec.md](the design spec) for more details and examples; the +differences can be subtle. We won't even go into how the `targets` and +`build_targets` differ from each other or from `compile_targets` and +`test_targets`. + +The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`, +`-q/--quiet`, and `-v/--verbose` flags work as documented for `mb gen`. + +### `mb audit` + +`mb audit` is used to track the progress of the GYP->GN migration. You can +use it to check a single master, or all the masters we care about. See +`mb help audit` for more details (most people are not expected to care about +this). + +### `mb gen` + +`mb gen` is responsible for generating the Ninja files by invoking either GYP +or GN as appropriate. It takes arguments to specify a build config and +a directory, then runs GYP or GN as appropriate: + +``` +% mb gen -m tryserver.chromium.linux -b linux_rel //out/Release +% mb gen -c linux_rel_trybot //out/Release +``` + +Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags +must be specified so that `mb` can figure out which config to use. The +`--phase` flag must also be used with builders that have multiple +build/compile steps (and only with those builders). + +By default, MB will look for a bot config file under `//ios/build/bots` (see +[design_spec.md](the design spec) for details of how the bot config files +work). If no matching one is found, will then look in +`//tools/mb/mb_config.pyl` to look up the config information, but you can +specify a custom config file using the `-f/--config-file` flag. + +The path must be a GN-style "source-absolute" path (as above). + +You can pass the `-n/--dryrun` flag to mb gen to see what will happen without +actually writing anything. + +You can pass the `-q/--quiet` flag to get mb to be silent unless there is an +error, and pass the `-v/--verbose` flag to get mb to log all of the files +that are read and written, and all the commands that are run. + +If the build config will use the Goma distributed-build system, you can pass +the path to your Goma client in the `-g/--goma-dir` flag, and it will be +incorporated into the appropriate flags for GYP or GN as needed. + +If gen ends up using GYP, the path must have a valid GYP configuration as the +last component of the path (i.e., specify `//out/Release_x64`, not `//out`). +The gyp script defaults to `//build/gyp_chromium`, but can be overridden with +the `--gyp-script` flag, e.g. `--gyp-script=gypfiles/gyp_v8`. + +### `mb help` + +Produces help output on the other subcommands + +### `mb lookup` + +Prints what command will be run by `mb gen` (like `mb gen -n` but does +not require you to specify a path). + +The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`, +`--phase`, `-q/--quiet`, and `-v/--verbose` flags work as documented for +`mb gen`. + +### `mb validate` + +Does internal checking to make sure the config file is syntactically +valid and that all of the entries are used properly. It does not validate +that the flags make sense, or that the builder names are legal or +comprehensive, but it does complain about configs and mixins that aren't +used. + +The `-f/--config-file` and `-q/--quiet` flags work as documented for +`mb gen`. + +This is mostly useful as a presubmit check and for verifying changes to +the config file. + +## Isolates and Swarming + +`mb gen` is also responsible for generating the `.isolate` and +`.isolated.gen.json` files needed to run test executables through swarming +in a GN build (in a GYP build, this is done as part of the compile step). + +If you wish to generate the isolate files, pass `mb gen` the +`--swarming-targets-file` command line argument; that arg should be a path +to a file containing a list of ninja build targets to compute the runtime +dependencies for (on Windows, use the ninja target name, not the file, so +`base_unittests`, not `base_unittests.exe`). + +MB will take this file, translate each build target to the matching GN +label (e.g., `base_unittests` -> `//base:base_unittests`, write that list +to a file called `runtime_deps` in the build directory, and pass that to +`gn gen $BUILD ... --runtime-deps-list-file=$BUILD/runtime_deps`. + +Once GN has computed the lists of runtime dependencies, MB will then +look up the command line for each target (currently this is hard-coded +in [mb.py](https://code.google.com/p/chromium/codesearch?q=mb.py#chromium/src/tools/mb/mb.py&q=mb.py%20GetIsolateCommand&sq=package:chromium&type=cs)), and write out the +matching `.isolate` and `.isolated.gen.json` files. + +## The `mb_config.pyl` config file + +The `mb_config.pyl` config file is intended to enumerate all of the +supported build configurations for Chromium. Generally speaking, you +should never need to (or want to) build a configuration that isn't +listed here, and so by using the configs in this file you can avoid +having to juggle long lists of GYP_DEFINES and gn args by hand. + +`mb_config.pyl` is structured as a file containing a single PYthon Literal +expression: a dictionary with three main keys, `masters`, `configs` and +`mixins`. + +The `masters` key contains a nested series of dicts containing mappings +of master -> builder -> config . This allows us to isolate the buildbot +recipes from the actual details of the configs. The config should either +be a single string value representing a key in the `configs` dictionary, +or a list of strings, each of which is a key in the `configs` dictionary; +the latter case is for builders that do multiple compiles with different +arguments in a single build, and must *only* be used for such builders +(where a --phase argument must be supplied in each lookup or gen call). + +The `configs` key points to a dictionary of named build configurations. + +There should be an key in this dict for every supported configuration +of Chromium, meaning every configuration we have a bot for, and every +configuration commonly used by develpers but that we may not have a bot +for. + +The value of each key is a list of "mixins" that will define what that +build_config does. Each item in the list must be an entry in the dictionary +value of the `mixins` key. + +Each mixin value is itself a dictionary that contains one or more of the +following keys: + + * `gyp_crosscompile`: a boolean; if true, GYP_CROSSCOMPILE=1 is set in + the environment and passed to GYP. + * `gyp_defines`: a string containing a list of GYP_DEFINES. + * `gn_args`: a string containing a list of values passed to gn --args. + * `mixins`: a list of other mixins that should be included. + * `type`: a string with either the value `gyp` or `gn`; + setting this indicates which meta-build tool to use. + +When `mb gen` or `mb analyze` executes, it takes a config name, looks it +up in the 'configs' dict, and then does a left-to-right expansion of the +mixins; gyp_defines and gn_args values are concatenated, and the type values +override each other. + +For example, if you had: + +``` +{ + 'configs`: { + 'linux_release_trybot': ['gyp_release', 'trybot'], + 'gn_shared_debug': None, + } + 'mixins': { + 'bot': { + 'gyp_defines': 'use_goma=1 dcheck_always_on=0', + 'gn_args': 'use_goma=true dcheck_always_on=false', + }, + 'debug': { + 'gn_args': 'is_debug=true', + }, + 'gn': {'type': 'gn'}, + 'gyp_release': { + 'mixins': ['release'], + 'type': 'gyp', + }, + 'release': { + 'gn_args': 'is_debug=false', + } + 'shared': { + 'gn_args': 'is_component_build=true', + 'gyp_defines': 'component=shared_library', + }, + 'trybot': { + 'gyp_defines': 'dcheck_always_on=1', + 'gn_args': 'dcheck_always_on=true', + } + } +} +``` + +and you ran `mb gen -c linux_release_trybot //out/Release`, it would +translate into a call to `gyp_chromium -G Release` with `GYP_DEFINES` set to +`"use_goma=true dcheck_always_on=false dcheck_always_on=true"`. + +(From that you can see that mb is intentionally dumb and does not +attempt to de-dup the flags, it lets gyp do that). + +## Debugging MB + +By design, MB should be simple enough that very little can go wrong. + +The most obvious issue is that you might see different commands being +run than you expect; running `'mb -v'` will print what it's doing and +run the commands; `'mb -n'` will print what it will do but *not* run +the commands. + +If you hit weirder things than that, add some print statements to the +python script, send a question to gn-dev@chromium.org, or +[file a bug](https://crbug.com/new) with the label +'mb' and cc: dpranke@chromium.org. + + diff --git a/tools/mb/mb b/tools/mb/mb new file mode 100755 index 0000000000..d3a0cdf019 --- /dev/null +++ b/tools/mb/mb @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +base_dir=$(dirname "$0") + +PYTHONDONTWRITEBYTECODE=1 exec python "$base_dir/mb.py" "$@" diff --git a/tools/mb/mb.bat b/tools/mb/mb.bat new file mode 100755 index 0000000000..a82770e714 --- /dev/null +++ b/tools/mb/mb.bat @@ -0,0 +1,6 @@ +@echo off +setlocal +:: This is required with cygwin only. +PATH=%~dp0;%PATH% +set PYTHONDONTWRITEBYTECODE=1 +call python "%~dp0mb.py" %* diff --git a/tools/mb/mb.py b/tools/mb/mb.py new file mode 100755 index 0000000000..038217e072 --- /dev/null +++ b/tools/mb/mb.py @@ -0,0 +1,1580 @@ +#!/usr/bin/env python +# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +"""MB - the Meta-Build wrapper around GYP and GN + +MB is a wrapper script for GYP and GN that can be used to generate build files +for sets of canned configurations and analyze them. +""" + +from __future__ import print_function + +import argparse +import ast +import errno +import json +import os +import pipes +import pprint +import re +import shutil +import sys +import subprocess +import tempfile +import traceback +import urllib2 + +from collections import OrderedDict + +CHROMIUM_SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) +sys.path = [os.path.join(CHROMIUM_SRC_DIR, 'build')] + sys.path + +import gn_helpers + + +def main(args): + mbw = MetaBuildWrapper() + return mbw.Main(args) + + +class MetaBuildWrapper(object): + def __init__(self): + self.chromium_src_dir = CHROMIUM_SRC_DIR + self.default_config = os.path.join(self.chromium_src_dir, 'tools', 'mb', + 'mb_config.pyl') + self.default_isolate_map = os.path.join(self.chromium_src_dir, 'testing', + 'buildbot', 'gn_isolate_map.pyl') + self.executable = sys.executable + self.platform = sys.platform + self.sep = os.sep + self.args = argparse.Namespace() + self.configs = {} + self.masters = {} + self.mixins = {} + + def Main(self, args): + self.ParseArgs(args) + try: + ret = self.args.func() + if ret: + self.DumpInputFiles() + return ret + except KeyboardInterrupt: + self.Print('interrupted, exiting') + return 130 + except Exception: + self.DumpInputFiles() + s = traceback.format_exc() + for l in s.splitlines(): + self.Print(l) + return 1 + + def ParseArgs(self, argv): + def AddCommonOptions(subp): + subp.add_argument('-b', '--builder', + help='builder name to look up config from') + subp.add_argument('-m', '--master', + help='master name to look up config from') + subp.add_argument('-c', '--config', + help='configuration to analyze') + subp.add_argument('--phase', + help='optional phase name (used when builders ' + 'do multiple compiles with different ' + 'arguments in a single build)') + subp.add_argument('-f', '--config-file', metavar='PATH', + default=self.default_config, + help='path to config file ' + '(default is %(default)s)') + subp.add_argument('-i', '--isolate-map-file', metavar='PATH', + default=self.default_isolate_map, + help='path to isolate map file ' + '(default is %(default)s)') + subp.add_argument('-g', '--goma-dir', + help='path to goma directory') + subp.add_argument('--gyp-script', metavar='PATH', + default=self.PathJoin('build', 'gyp_chromium'), + help='path to gyp script relative to project root ' + '(default is %(default)s)') + subp.add_argument('--android-version-code', + help='Sets GN arg android_default_version_code and ' + 'GYP_DEFINE app_manifest_version_code') + subp.add_argument('--android-version-name', + help='Sets GN arg android_default_version_name and ' + 'GYP_DEFINE app_manifest_version_name') + subp.add_argument('-n', '--dryrun', action='store_true', + help='Do a dry run (i.e., do nothing, just print ' + 'the commands that will run)') + subp.add_argument('-v', '--verbose', action='store_true', + help='verbose logging') + + parser = argparse.ArgumentParser(prog='mb') + subps = parser.add_subparsers() + + subp = subps.add_parser('analyze', + help='analyze whether changes to a set of files ' + 'will cause a set of binaries to be rebuilt.') + AddCommonOptions(subp) + subp.add_argument('path', nargs=1, + help='path build was generated into.') + subp.add_argument('input_path', nargs=1, + help='path to a file containing the input arguments ' + 'as a JSON object.') + subp.add_argument('output_path', nargs=1, + help='path to a file containing the output arguments ' + 'as a JSON object.') + subp.set_defaults(func=self.CmdAnalyze) + + subp = subps.add_parser('export', + help='print out the expanded configuration for' + 'each builder as a JSON object') + subp.add_argument('-f', '--config-file', metavar='PATH', + default=self.default_config, + help='path to config file (default is %(default)s)') + subp.add_argument('-g', '--goma-dir', + help='path to goma directory') + subp.set_defaults(func=self.CmdExport) + + subp = subps.add_parser('gen', + help='generate a new set of build files') + AddCommonOptions(subp) + subp.add_argument('--swarming-targets-file', + help='save runtime dependencies for targets listed ' + 'in file.') + subp.add_argument('path', nargs=1, + help='path to generate build into') + subp.set_defaults(func=self.CmdGen) + + subp = subps.add_parser('isolate', + help='generate the .isolate files for a given' + 'binary') + AddCommonOptions(subp) + subp.add_argument('path', nargs=1, + help='path build was generated into') + subp.add_argument('target', nargs=1, + help='ninja target to generate the isolate for') + subp.set_defaults(func=self.CmdIsolate) + + subp = subps.add_parser('lookup', + help='look up the command for a given config or ' + 'builder') + AddCommonOptions(subp) + subp.set_defaults(func=self.CmdLookup) + + subp = subps.add_parser( + 'run', + help='build and run the isolated version of a ' + 'binary', + formatter_class=argparse.RawDescriptionHelpFormatter) + subp.description = ( + 'Build, isolate, and run the given binary with the command line\n' + 'listed in the isolate. You may pass extra arguments after the\n' + 'target; use "--" if the extra arguments need to include switches.\n' + '\n' + 'Examples:\n' + '\n' + ' % tools/mb/mb.py run -m chromium.linux -b "Linux Builder" \\\n' + ' //out/Default content_browsertests\n' + '\n' + ' % tools/mb/mb.py run out/Default content_browsertests\n' + '\n' + ' % tools/mb/mb.py run out/Default content_browsertests -- \\\n' + ' --test-launcher-retry-limit=0' + '\n' + ) + + AddCommonOptions(subp) + subp.add_argument('-j', '--jobs', dest='jobs', type=int, + help='Number of jobs to pass to ninja') + subp.add_argument('--no-build', dest='build', default=True, + action='store_false', + help='Do not build, just isolate and run') + subp.add_argument('path', nargs=1, + help=('path to generate build into (or use).' + ' This can be either a regular path or a ' + 'GN-style source-relative path like ' + '//out/Default.')) + subp.add_argument('target', nargs=1, + help='ninja target to build and run') + subp.add_argument('extra_args', nargs='*', + help=('extra args to pass to the isolate to run. Use ' + '"--" as the first arg if you need to pass ' + 'switches')) + subp.set_defaults(func=self.CmdRun) + + subp = subps.add_parser('validate', + help='validate the config file') + subp.add_argument('-f', '--config-file', metavar='PATH', + default=self.default_config, + help='path to config file (default is %(default)s)') + subp.set_defaults(func=self.CmdValidate) + + subp = subps.add_parser('audit', + help='Audit the config file to track progress') + subp.add_argument('-f', '--config-file', metavar='PATH', + default=self.default_config, + help='path to config file (default is %(default)s)') + subp.add_argument('-i', '--internal', action='store_true', + help='check internal masters also') + subp.add_argument('-m', '--master', action='append', + help='master to audit (default is all non-internal ' + 'masters in file)') + subp.add_argument('-u', '--url-template', action='store', + default='https://build.chromium.org/p/' + '{master}/json/builders', + help='URL scheme for JSON APIs to buildbot ' + '(default: %(default)s) ') + subp.add_argument('-c', '--check-compile', action='store_true', + help='check whether tbd and master-only bots actually' + ' do compiles') + subp.set_defaults(func=self.CmdAudit) + + subp = subps.add_parser('help', + help='Get help on a subcommand.') + subp.add_argument(nargs='?', action='store', dest='subcommand', + help='The command to get help for.') + subp.set_defaults(func=self.CmdHelp) + + self.args = parser.parse_args(argv) + + def DumpInputFiles(self): + + def DumpContentsOfFilePassedTo(arg_name, path): + if path and self.Exists(path): + self.Print("\n# To recreate the file passed to %s:" % arg_name) + self.Print("%% cat > %s <GN migration on the bots.""" + + # First, make sure the config file is okay, but don't print anything + # if it is (it will throw an error if it isn't). + self.CmdValidate(print_ok=False) + + stats = OrderedDict() + STAT_MASTER_ONLY = 'Master only' + STAT_CONFIG_ONLY = 'Config only' + STAT_TBD = 'Still TBD' + STAT_GYP = 'Still GYP' + STAT_DONE = 'Done (on GN)' + stats[STAT_MASTER_ONLY] = 0 + stats[STAT_CONFIG_ONLY] = 0 + stats[STAT_TBD] = 0 + stats[STAT_GYP] = 0 + stats[STAT_DONE] = 0 + + def PrintBuilders(heading, builders, notes): + stats.setdefault(heading, 0) + stats[heading] += len(builders) + if builders: + self.Print(' %s:' % heading) + for builder in sorted(builders): + self.Print(' %s%s' % (builder, notes[builder])) + + self.ReadConfigFile() + + masters = self.args.master or self.masters + for master in sorted(masters): + url = self.args.url_template.replace('{master}', master) + + self.Print('Auditing %s' % master) + + MASTERS_TO_SKIP = ( + 'client.skia', + 'client.v8.fyi', + 'tryserver.v8', + ) + if master in MASTERS_TO_SKIP: + # Skip these bots because converting them is the responsibility of + # those teams and out of scope for the Chromium migration to GN. + self.Print(' Skipped (out of scope)') + self.Print('') + continue + + INTERNAL_MASTERS = ('official.desktop', 'official.desktop.continuous', + 'internal.client.kitchensync') + if master in INTERNAL_MASTERS and not self.args.internal: + # Skip these because the servers aren't accessible by default ... + self.Print(' Skipped (internal)') + self.Print('') + continue + + try: + # Fetch the /builders contents from the buildbot master. The + # keys of the dict are the builder names themselves. + json_contents = self.Fetch(url) + d = json.loads(json_contents) + except Exception as e: + self.Print(str(e)) + return 1 + + config_builders = set(self.masters[master]) + master_builders = set(d.keys()) + both = master_builders & config_builders + master_only = master_builders - config_builders + config_only = config_builders - master_builders + tbd = set() + gyp = set() + done = set() + notes = {builder: '' for builder in config_builders | master_builders} + + for builder in both: + config = self.masters[master][builder] + if config == 'tbd': + tbd.add(builder) + elif isinstance(config, dict): + vals = self.FlattenConfig(config.values()[0]) + if vals['type'] == 'gyp': + gyp.add(builder) + else: + done.add(builder) + elif config.startswith('//'): + done.add(builder) + else: + vals = self.FlattenConfig(config) + if vals['type'] == 'gyp': + gyp.add(builder) + else: + done.add(builder) + + if self.args.check_compile and (tbd or master_only): + either = tbd | master_only + for builder in either: + notes[builder] = ' (' + self.CheckCompile(master, builder) +')' + + if master_only or config_only or tbd or gyp: + PrintBuilders(STAT_MASTER_ONLY, master_only, notes) + PrintBuilders(STAT_CONFIG_ONLY, config_only, notes) + PrintBuilders(STAT_TBD, tbd, notes) + PrintBuilders(STAT_GYP, gyp, notes) + else: + self.Print(' All GN!') + + stats[STAT_DONE] += len(done) + + self.Print('') + + fmt = '{:<27} {:>4}' + self.Print(fmt.format('Totals', str(sum(int(v) for v in stats.values())))) + self.Print(fmt.format('-' * 27, '----')) + for stat, count in stats.items(): + self.Print(fmt.format(stat, str(count))) + + return 0 + + def GetConfig(self): + build_dir = self.args.path[0] + + vals = self.DefaultVals() + if self.args.builder or self.args.master or self.args.config: + vals = self.Lookup() + if vals['type'] == 'gn': + # Re-run gn gen in order to ensure the config is consistent with the + # build dir. + self.RunGNGen(vals) + return vals + + mb_type_path = self.PathJoin(self.ToAbsPath(build_dir), 'mb_type') + if not self.Exists(mb_type_path): + toolchain_path = self.PathJoin(self.ToAbsPath(build_dir), + 'toolchain.ninja') + if not self.Exists(toolchain_path): + self.Print('Must either specify a path to an existing GN build dir ' + 'or pass in a -m/-b pair or a -c flag to specify the ' + 'configuration') + return {} + else: + mb_type = 'gn' + else: + mb_type = self.ReadFile(mb_type_path).strip() + + if mb_type == 'gn': + vals['gn_args'] = self.GNArgsFromDir(build_dir) + vals['type'] = mb_type + + return vals + + def GNArgsFromDir(self, build_dir): + args_contents = "" + gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn') + if self.Exists(gn_args_path): + args_contents = self.ReadFile(gn_args_path) + gn_args = [] + for l in args_contents.splitlines(): + fields = l.split(' ') + name = fields[0] + val = ' '.join(fields[2:]) + gn_args.append('%s=%s' % (name, val)) + + return ' '.join(gn_args) + + def Lookup(self): + vals = self.ReadIOSBotConfig() + if not vals: + self.ReadConfigFile() + config = self.ConfigFromArgs() + if config.startswith('//'): + if not self.Exists(self.ToAbsPath(config)): + raise MBErr('args file "%s" not found' % config) + vals = self.DefaultVals() + vals['args_file'] = config + else: + if not config in self.configs: + raise MBErr('Config "%s" not found in %s' % + (config, self.args.config_file)) + vals = self.FlattenConfig(config) + + # Do some basic sanity checking on the config so that we + # don't have to do this in every caller. + if 'type' not in vals: + vals['type'] = 'gn' + assert vals['type'] in ('gn', 'gyp'), ( + 'Unknown meta-build type "%s"' % vals['gn_args']) + + return vals + + def ReadIOSBotConfig(self): + if not self.args.master or not self.args.builder: + return {} + path = self.PathJoin(self.chromium_src_dir, 'ios', 'build', 'bots', + self.args.master, self.args.builder + '.json') + if not self.Exists(path): + return {} + + contents = json.loads(self.ReadFile(path)) + gyp_vals = contents.get('GYP_DEFINES', {}) + if isinstance(gyp_vals, dict): + gyp_defines = ' '.join('%s=%s' % (k, v) for k, v in gyp_vals.items()) + else: + gyp_defines = ' '.join(gyp_vals) + gn_args = ' '.join(contents.get('gn_args', [])) + + vals = self.DefaultVals() + vals['gn_args'] = gn_args + vals['gyp_defines'] = gyp_defines + vals['type'] = contents.get('mb_type', 'gn') + return vals + + def ReadConfigFile(self): + if not self.Exists(self.args.config_file): + raise MBErr('config file not found at %s' % self.args.config_file) + + try: + contents = ast.literal_eval(self.ReadFile(self.args.config_file)) + except SyntaxError as e: + raise MBErr('Failed to parse config file "%s": %s' % + (self.args.config_file, e)) + + self.configs = contents['configs'] + self.masters = contents['masters'] + self.mixins = contents['mixins'] + + def ReadIsolateMap(self): + if not self.Exists(self.args.isolate_map_file): + raise MBErr('isolate map file not found at %s' % + self.args.isolate_map_file) + try: + return ast.literal_eval(self.ReadFile(self.args.isolate_map_file)) + except SyntaxError as e: + raise MBErr('Failed to parse isolate map file "%s": %s' % + (self.args.isolate_map_file, e)) + + def ConfigFromArgs(self): + if self.args.config: + if self.args.master or self.args.builder: + raise MBErr('Can not specific both -c/--config and -m/--master or ' + '-b/--builder') + + return self.args.config + + if not self.args.master or not self.args.builder: + raise MBErr('Must specify either -c/--config or ' + '(-m/--master and -b/--builder)') + + if not self.args.master in self.masters: + raise MBErr('Master name "%s" not found in "%s"' % + (self.args.master, self.args.config_file)) + + if not self.args.builder in self.masters[self.args.master]: + raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' % + (self.args.builder, self.args.master, self.args.config_file)) + + config = self.masters[self.args.master][self.args.builder] + if isinstance(config, dict): + if self.args.phase is None: + raise MBErr('Must specify a build --phase for %s on %s' % + (self.args.builder, self.args.master)) + phase = str(self.args.phase) + if phase not in config: + raise MBErr('Phase %s doesn\'t exist for %s on %s' % + (phase, self.args.builder, self.args.master)) + return config[phase] + + if self.args.phase is not None: + raise MBErr('Must not specify a build --phase for %s on %s' % + (self.args.builder, self.args.master)) + return config + + def FlattenConfig(self, config): + mixins = self.configs[config] + vals = self.DefaultVals() + + visited = [] + self.FlattenMixins(mixins, vals, visited) + return vals + + def DefaultVals(self): + return { + 'args_file': '', + 'cros_passthrough': False, + 'gn_args': '', + 'gyp_defines': '', + 'gyp_crosscompile': False, + 'type': 'gn', + } + + def FlattenMixins(self, mixins, vals, visited): + for m in mixins: + if m not in self.mixins: + raise MBErr('Unknown mixin "%s"' % m) + + visited.append(m) + + mixin_vals = self.mixins[m] + + if 'cros_passthrough' in mixin_vals: + vals['cros_passthrough'] = mixin_vals['cros_passthrough'] + if 'gn_args' in mixin_vals: + if vals['gn_args']: + vals['gn_args'] += ' ' + mixin_vals['gn_args'] + else: + vals['gn_args'] = mixin_vals['gn_args'] + if 'gyp_crosscompile' in mixin_vals: + vals['gyp_crosscompile'] = mixin_vals['gyp_crosscompile'] + if 'gyp_defines' in mixin_vals: + if vals['gyp_defines']: + vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines'] + else: + vals['gyp_defines'] = mixin_vals['gyp_defines'] + if 'type' in mixin_vals: + vals['type'] = mixin_vals['type'] + + if 'mixins' in mixin_vals: + self.FlattenMixins(mixin_vals['mixins'], vals, visited) + return vals + + def ClobberIfNeeded(self, vals): + path = self.args.path[0] + build_dir = self.ToAbsPath(path) + mb_type_path = self.PathJoin(build_dir, 'mb_type') + needs_clobber = False + new_mb_type = vals['type'] + if self.Exists(build_dir): + if self.Exists(mb_type_path): + old_mb_type = self.ReadFile(mb_type_path) + if old_mb_type != new_mb_type: + self.Print("Build type mismatch: was %s, will be %s, clobbering %s" % + (old_mb_type, new_mb_type, path)) + needs_clobber = True + else: + # There is no 'mb_type' file in the build directory, so this probably + # means that the prior build(s) were not done through mb, and we + # have no idea if this was a GYP build or a GN build. Clobber it + # to be safe. + self.Print("%s/mb_type missing, clobbering to be safe" % path) + needs_clobber = True + + if self.args.dryrun: + return + + if needs_clobber: + self.RemoveDirectory(build_dir) + + self.MaybeMakeDirectory(build_dir) + self.WriteFile(mb_type_path, new_mb_type) + + def RunGNGen(self, vals): + build_dir = self.args.path[0] + + cmd = self.GNCmd('gen', build_dir, '--check') + gn_args = self.GNArgs(vals) + + # Since GN hasn't run yet, the build directory may not even exist. + self.MaybeMakeDirectory(self.ToAbsPath(build_dir)) + + gn_args_path = self.ToAbsPath(build_dir, 'args.gn') + self.WriteFile(gn_args_path, gn_args, force_verbose=True) + + swarming_targets = [] + if getattr(self.args, 'swarming_targets_file', None): + # We need GN to generate the list of runtime dependencies for + # the compile targets listed (one per line) in the file so + # we can run them via swarming. We use gn_isolate_map.pyl to convert + # the compile targets to the matching GN labels. + path = self.args.swarming_targets_file + if not self.Exists(path): + self.WriteFailureAndRaise('"%s" does not exist' % path, + output_path=None) + contents = self.ReadFile(path) + swarming_targets = set(contents.splitlines()) + + isolate_map = self.ReadIsolateMap() + err, labels = self.MapTargetsToLabels(isolate_map, swarming_targets) + if err: + raise MBErr(err) + + gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps') + self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n') + cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path) + + ret, _, _ = self.Run(cmd) + if ret: + # If `gn gen` failed, we should exit early rather than trying to + # generate isolates. Run() will have already logged any error output. + self.Print('GN gen failed: %d' % ret) + return ret + + android = 'target_os="android"' in vals['gn_args'] + for target in swarming_targets: + if android: + # Android targets may be either android_apk or executable. The former + # will result in runtime_deps associated with the stamp file, while the + # latter will result in runtime_deps associated with the executable. + label = isolate_map[target]['label'] + runtime_deps_targets = [ + target + '.runtime_deps', + 'obj/%s.stamp.runtime_deps' % label.replace(':', '/')] + elif isolate_map[target]['type'] == 'gpu_browser_test': + if self.platform == 'win32': + runtime_deps_targets = ['browser_tests.exe.runtime_deps'] + else: + runtime_deps_targets = ['browser_tests.runtime_deps'] + elif (isolate_map[target]['type'] == 'script' or + isolate_map[target].get('label_type') == 'group'): + # For script targets, the build target is usually a group, + # for which gn generates the runtime_deps next to the stamp file + # for the label, which lives under the obj/ directory, but it may + # also be an executable. + label = isolate_map[target]['label'] + runtime_deps_targets = [ + 'obj/%s.stamp.runtime_deps' % label.replace(':', '/')] + if self.platform == 'win32': + runtime_deps_targets += [ target + '.exe.runtime_deps' ] + else: + runtime_deps_targets += [ target + '.runtime_deps' ] + elif self.platform == 'win32': + runtime_deps_targets = [target + '.exe.runtime_deps'] + else: + runtime_deps_targets = [target + '.runtime_deps'] + + for r in runtime_deps_targets: + runtime_deps_path = self.ToAbsPath(build_dir, r) + if self.Exists(runtime_deps_path): + break + else: + raise MBErr('did not generate any of %s' % + ', '.join(runtime_deps_targets)) + + command, extra_files = self.GetIsolateCommand(target, vals) + + runtime_deps = self.ReadFile(runtime_deps_path).splitlines() + + self.WriteIsolateFiles(build_dir, command, target, runtime_deps, + extra_files) + + return 0 + + def RunGNIsolate(self, vals): + target = self.args.target[0] + isolate_map = self.ReadIsolateMap() + err, labels = self.MapTargetsToLabels(isolate_map, [target]) + if err: + raise MBErr(err) + label = labels[0] + + build_dir = self.args.path[0] + command, extra_files = self.GetIsolateCommand(target, vals) + + cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps') + ret, out, _ = self.Call(cmd) + if ret: + if out: + self.Print(out) + return ret + + runtime_deps = out.splitlines() + + self.WriteIsolateFiles(build_dir, command, target, runtime_deps, + extra_files) + + ret, _, _ = self.Run([ + self.executable, + self.PathJoin('tools', 'swarming_client', 'isolate.py'), + 'check', + '-i', + self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)), + '-s', + self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target))], + buffer_output=False) + + return ret + + def WriteIsolateFiles(self, build_dir, command, target, runtime_deps, + extra_files): + isolate_path = self.ToAbsPath(build_dir, target + '.isolate') + self.WriteFile(isolate_path, + pprint.pformat({ + 'variables': { + 'command': command, + 'files': sorted(runtime_deps + extra_files), + } + }) + '\n') + + self.WriteJSON( + { + 'args': [ + '--isolated', + self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)), + '--isolate', + self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)), + ], + 'dir': self.chromium_src_dir, + 'version': 1, + }, + isolate_path + 'd.gen.json', + ) + + def MapTargetsToLabels(self, isolate_map, targets): + labels = [] + err = '' + + def StripTestSuffixes(target): + for suffix in ('_apk_run', '_apk', '_run'): + if target.endswith(suffix): + return target[:-len(suffix)], suffix + return None, None + + for target in targets: + if target == 'all': + labels.append(target) + elif target.startswith('//'): + labels.append(target) + else: + if target in isolate_map: + stripped_target, suffix = target, '' + else: + stripped_target, suffix = StripTestSuffixes(target) + if stripped_target in isolate_map: + if isolate_map[stripped_target]['type'] == 'unknown': + err += ('test target "%s" type is unknown\n' % target) + else: + labels.append(isolate_map[stripped_target]['label'] + suffix) + else: + err += ('target "%s" not found in ' + '//testing/buildbot/gn_isolate_map.pyl\n' % target) + + return err, labels + + def GNCmd(self, subcommand, path, *args): + if self.platform == 'linux2': + subdir, exe = 'linux64', 'gn' + elif self.platform == 'darwin': + subdir, exe = 'mac', 'gn' + else: + subdir, exe = 'win', 'gn.exe' + + gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, exe) + return [gn_path, subcommand, path] + list(args) + + + def GNArgs(self, vals): + if vals['cros_passthrough']: + if not 'GN_ARGS' in os.environ: + raise MBErr('MB is expecting GN_ARGS to be in the environment') + gn_args = os.environ['GN_ARGS'] + if not re.search('target_os.*=.*"chromeos"', gn_args): + raise MBErr('GN_ARGS is missing target_os = "chromeos": (GN_ARGS=%s)' % + gn_args) + else: + gn_args = vals['gn_args'] + + if self.args.goma_dir: + gn_args += ' goma_dir="%s"' % self.args.goma_dir + + android_version_code = self.args.android_version_code + if android_version_code: + gn_args += ' android_default_version_code="%s"' % android_version_code + + android_version_name = self.args.android_version_name + if android_version_name: + gn_args += ' android_default_version_name="%s"' % android_version_name + + # Canonicalize the arg string into a sorted, newline-separated list + # of key-value pairs, and de-dup the keys if need be so that only + # the last instance of each arg is listed. + gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args)) + + args_file = vals.get('args_file', None) + if args_file: + gn_args = ('import("%s")\n' % vals['args_file']) + gn_args + return gn_args + + def RunGYPGen(self, vals): + path = self.args.path[0] + + output_dir = self.ParseGYPConfigPath(path) + cmd, env = self.GYPCmd(output_dir, vals) + ret, _, _ = self.Run(cmd, env=env) + return ret + + def RunGYPAnalyze(self, vals): + output_dir = self.ParseGYPConfigPath(self.args.path[0]) + if self.args.verbose: + inp = self.ReadInputJSON(['files', 'test_targets', + 'additional_compile_targets']) + self.Print() + self.Print('analyze input:') + self.PrintJSON(inp) + self.Print() + + cmd, env = self.GYPCmd(output_dir, vals) + cmd.extend(['-f', 'analyzer', + '-G', 'config_path=%s' % self.args.input_path[0], + '-G', 'analyzer_output_path=%s' % self.args.output_path[0]]) + ret, _, _ = self.Run(cmd, env=env) + if not ret and self.args.verbose: + outp = json.loads(self.ReadFile(self.args.output_path[0])) + self.Print() + self.Print('analyze output:') + self.PrintJSON(outp) + self.Print() + + return ret + + def GetIsolateCommand(self, target, vals): + android = 'target_os="android"' in vals['gn_args'] + + # This needs to mirror the settings in //build/config/ui.gni: + # use_x11 = is_linux && !use_ozone. + use_x11 = (self.platform == 'linux2' and + not android and + not 'use_ozone=true' in vals['gn_args']) + + asan = 'is_asan=true' in vals['gn_args'] + msan = 'is_msan=true' in vals['gn_args'] + tsan = 'is_tsan=true' in vals['gn_args'] + + isolate_map = self.ReadIsolateMap() + test_type = isolate_map[target]['type'] + + executable = isolate_map[target].get('executable', target) + executable_suffix = '.exe' if self.platform == 'win32' else '' + + cmdline = [] + extra_files = [] + + if test_type == 'nontest': + self.WriteFailureAndRaise('We should not be isolating %s.' % target, + output_path=None) + + if android and test_type != "script": + logdog_command = [ + '--logdog-bin-cmd', './../../bin/logdog_butler', + '--project', 'chromium', + '--service-account-json', + '/creds/service_accounts/service-account-luci-logdog-publisher.json', + '--prefix', 'android/swarming/logcats/${SWARMING_TASK_ID}', + '--source', '${ISOLATED_OUTDIR}/logcats', + '--name', 'unified_logcats', + ] + test_cmdline = [ + self.PathJoin('bin', 'run_%s' % target), + '--logcat-output-file', '${ISOLATED_OUTDIR}/logcats', + '--target-devices-file', '${SWARMING_BOT_FILE}', + '-v' + ] + cmdline = (['./../../build/android/test_wrapper/logdog_wrapper.py'] + + logdog_command + test_cmdline) + elif use_x11 and test_type == 'windowed_test_launcher': + extra_files = [ + 'xdisplaycheck', + '../../testing/test_env.py', + '../../testing/xvfb.py', + ] + cmdline = [ + '../../testing/xvfb.py', + '.', + './' + str(executable) + executable_suffix, + '--brave-new-test-launcher', + '--test-launcher-bot-mode', + '--asan=%d' % asan, + '--msan=%d' % msan, + '--tsan=%d' % tsan, + ] + elif test_type in ('windowed_test_launcher', 'console_test_launcher'): + extra_files = [ + '../../testing/test_env.py' + ] + cmdline = [ + '../../testing/test_env.py', + './' + str(executable) + executable_suffix, + '--brave-new-test-launcher', + '--test-launcher-bot-mode', + '--asan=%d' % asan, + '--msan=%d' % msan, + '--tsan=%d' % tsan, + ] + elif test_type == 'gpu_browser_test': + extra_files = [ + '../../testing/test_env.py' + ] + gtest_filter = isolate_map[target]['gtest_filter'] + cmdline = [ + '../../testing/test_env.py', + './browser_tests' + executable_suffix, + '--test-launcher-bot-mode', + '--enable-gpu', + '--test-launcher-jobs=1', + '--gtest_filter=%s' % gtest_filter, + ] + elif test_type == 'script': + extra_files = [ + '../../testing/test_env.py' + ] + cmdline = [ + '../../testing/test_env.py', + '../../' + self.ToSrcRelPath(isolate_map[target]['script']) + ] + elif test_type in ('raw'): + extra_files = [] + cmdline = [ + './' + str(target) + executable_suffix, + ] + + else: + self.WriteFailureAndRaise('No command line for %s found (test type %s).' + % (target, test_type), output_path=None) + + cmdline += isolate_map[target].get('args', []) + + return cmdline, extra_files + + def ToAbsPath(self, build_path, *comps): + return self.PathJoin(self.chromium_src_dir, + self.ToSrcRelPath(build_path), + *comps) + + def ToSrcRelPath(self, path): + """Returns a relative path from the top of the repo.""" + if path.startswith('//'): + return path[2:].replace('/', self.sep) + return self.RelPath(path, self.chromium_src_dir) + + def ParseGYPConfigPath(self, path): + rpath = self.ToSrcRelPath(path) + output_dir, _, _ = rpath.rpartition(self.sep) + return output_dir + + def GYPCmd(self, output_dir, vals): + if vals['cros_passthrough']: + if not 'GYP_DEFINES' in os.environ: + raise MBErr('MB is expecting GYP_DEFINES to be in the environment') + gyp_defines = os.environ['GYP_DEFINES'] + if not 'chromeos=1' in gyp_defines: + raise MBErr('GYP_DEFINES is missing chromeos=1: (GYP_DEFINES=%s)' % + gyp_defines) + else: + gyp_defines = vals['gyp_defines'] + + goma_dir = self.args.goma_dir + + # GYP uses shlex.split() to split the gyp defines into separate arguments, + # so we can support backslashes and and spaces in arguments by quoting + # them, even on Windows, where this normally wouldn't work. + if goma_dir and ('\\' in goma_dir or ' ' in goma_dir): + goma_dir = "'%s'" % goma_dir + + if goma_dir: + gyp_defines += ' gomadir=%s' % goma_dir + + android_version_code = self.args.android_version_code + if android_version_code: + gyp_defines += ' app_manifest_version_code=%s' % android_version_code + + android_version_name = self.args.android_version_name + if android_version_name: + gyp_defines += ' app_manifest_version_name=%s' % android_version_name + + cmd = [ + self.executable, + self.args.gyp_script, + '-G', + 'output_dir=' + output_dir, + ] + + # Ensure that we have an environment that only contains + # the exact values of the GYP variables we need. + env = os.environ.copy() + + # This is a terrible hack to work around the fact that + # //tools/clang/scripts/update.py is invoked by GYP and GN but + # currently relies on an environment variable to figure out + # what revision to embed in the command line #defines. + # For GN, we've made this work via a gn arg that will cause update.py + # to get an additional command line arg, but getting that to work + # via GYP_DEFINES has proven difficult, so we rewrite the GYP_DEFINES + # to get rid of the arg and add the old var in, instead. + # See crbug.com/582737 for more on this. This can hopefully all + # go away with GYP. + m = re.search('llvm_force_head_revision=1\s*', gyp_defines) + if m: + env['LLVM_FORCE_HEAD_REVISION'] = '1' + gyp_defines = gyp_defines.replace(m.group(0), '') + + # This is another terrible hack to work around the fact that + # GYP sets the link concurrency to use via the GYP_LINK_CONCURRENCY + # environment variable, and not via a proper GYP_DEFINE. See + # crbug.com/611491 for more on this. + m = re.search('gyp_link_concurrency=(\d+)(\s*)', gyp_defines) + if m: + env['GYP_LINK_CONCURRENCY'] = m.group(1) + gyp_defines = gyp_defines.replace(m.group(0), '') + + env['GYP_GENERATORS'] = 'ninja' + if 'GYP_CHROMIUM_NO_ACTION' in env: + del env['GYP_CHROMIUM_NO_ACTION'] + if 'GYP_CROSSCOMPILE' in env: + del env['GYP_CROSSCOMPILE'] + env['GYP_DEFINES'] = gyp_defines + if vals['gyp_crosscompile']: + env['GYP_CROSSCOMPILE'] = '1' + return cmd, env + + def RunGNAnalyze(self, vals): + # Analyze runs before 'gn gen' now, so we need to run gn gen + # in order to ensure that we have a build directory. + ret = self.RunGNGen(vals) + if ret: + return ret + + build_path = self.args.path[0] + input_path = self.args.input_path[0] + gn_input_path = input_path + '.gn' + output_path = self.args.output_path[0] + gn_output_path = output_path + '.gn' + + inp = self.ReadInputJSON(['files', 'test_targets', + 'additional_compile_targets']) + if self.args.verbose: + self.Print() + self.Print('analyze input:') + self.PrintJSON(inp) + self.Print() + + + # This shouldn't normally happen, but could due to unusual race conditions, + # like a try job that gets scheduled before a patch lands but runs after + # the patch has landed. + if not inp['files']: + self.Print('Warning: No files modified in patch, bailing out early.') + self.WriteJSON({ + 'status': 'No dependency', + 'compile_targets': [], + 'test_targets': [], + }, output_path) + return 0 + + gn_inp = {} + gn_inp['files'] = ['//' + f for f in inp['files'] if not f.startswith('//')] + + isolate_map = self.ReadIsolateMap() + err, gn_inp['additional_compile_targets'] = self.MapTargetsToLabels( + isolate_map, inp['additional_compile_targets']) + if err: + raise MBErr(err) + + err, gn_inp['test_targets'] = self.MapTargetsToLabels( + isolate_map, inp['test_targets']) + if err: + raise MBErr(err) + labels_to_targets = {} + for i, label in enumerate(gn_inp['test_targets']): + labels_to_targets[label] = inp['test_targets'][i] + + try: + self.WriteJSON(gn_inp, gn_input_path) + cmd = self.GNCmd('analyze', build_path, gn_input_path, gn_output_path) + ret, _, _ = self.Run(cmd, force_verbose=True) + if ret: + return ret + + gn_outp_str = self.ReadFile(gn_output_path) + try: + gn_outp = json.loads(gn_outp_str) + except Exception as e: + self.Print("Failed to parse the JSON string GN returned: %s\n%s" + % (repr(gn_outp_str), str(e))) + raise + + outp = {} + if 'status' in gn_outp: + outp['status'] = gn_outp['status'] + if 'error' in gn_outp: + outp['error'] = gn_outp['error'] + if 'invalid_targets' in gn_outp: + outp['invalid_targets'] = gn_outp['invalid_targets'] + if 'compile_targets' in gn_outp: + if 'all' in gn_outp['compile_targets']: + outp['compile_targets'] = ['all'] + else: + outp['compile_targets'] = [ + label.replace('//', '') for label in gn_outp['compile_targets']] + if 'test_targets' in gn_outp: + outp['test_targets'] = [ + labels_to_targets[label] for label in gn_outp['test_targets']] + + if self.args.verbose: + self.Print() + self.Print('analyze output:') + self.PrintJSON(outp) + self.Print() + + self.WriteJSON(outp, output_path) + + finally: + if self.Exists(gn_input_path): + self.RemoveFile(gn_input_path) + if self.Exists(gn_output_path): + self.RemoveFile(gn_output_path) + + return 0 + + def ReadInputJSON(self, required_keys): + path = self.args.input_path[0] + output_path = self.args.output_path[0] + if not self.Exists(path): + self.WriteFailureAndRaise('"%s" does not exist' % path, output_path) + + try: + inp = json.loads(self.ReadFile(path)) + except Exception as e: + self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' % + (path, e), output_path) + + for k in required_keys: + if not k in inp: + self.WriteFailureAndRaise('input file is missing a "%s" key' % k, + output_path) + + return inp + + def WriteFailureAndRaise(self, msg, output_path): + if output_path: + self.WriteJSON({'error': msg}, output_path, force_verbose=True) + raise MBErr(msg) + + def WriteJSON(self, obj, path, force_verbose=False): + try: + self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n', + force_verbose=force_verbose) + except Exception as e: + raise MBErr('Error %s writing to the output path "%s"' % + (e, path)) + + def CheckCompile(self, master, builder): + url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1' + url = urllib2.quote(url_template.format(master=master, builder=builder), + safe=':/()?=') + try: + builds = json.loads(self.Fetch(url)) + except Exception as e: + return str(e) + successes = sorted( + [int(x) for x in builds.keys() if "text" in builds[x] and + cmp(builds[x]["text"][:2], ["build", "successful"]) == 0], + reverse=True) + if not successes: + return "no successful builds" + build = builds[str(successes[0])] + step_names = set([step["name"] for step in build["steps"]]) + compile_indicators = set(["compile", "compile (with patch)", "analyze"]) + if compile_indicators & step_names: + return "compiles" + return "does not compile" + + def PrintCmd(self, cmd, env): + if self.platform == 'win32': + env_prefix = 'set ' + env_quoter = QuoteForSet + shell_quoter = QuoteForCmd + else: + env_prefix = '' + env_quoter = pipes.quote + shell_quoter = pipes.quote + + def print_env(var): + if env and var in env: + self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var]))) + + print_env('GYP_CROSSCOMPILE') + print_env('GYP_DEFINES') + print_env('GYP_LINK_CONCURRENCY') + print_env('LLVM_FORCE_HEAD_REVISION') + + if cmd[0] == self.executable: + cmd = ['python'] + cmd[1:] + self.Print(*[shell_quoter(arg) for arg in cmd]) + + def PrintJSON(self, obj): + self.Print(json.dumps(obj, indent=2, sort_keys=True)) + + def Build(self, target): + build_dir = self.ToSrcRelPath(self.args.path[0]) + ninja_cmd = ['ninja', '-C', build_dir] + if self.args.jobs: + ninja_cmd.extend(['-j', '%d' % self.args.jobs]) + ninja_cmd.append(target) + ret, _, _ = self.Run(ninja_cmd, force_verbose=False, buffer_output=False) + return ret + + def Run(self, cmd, env=None, force_verbose=True, buffer_output=True): + # This function largely exists so it can be overridden for testing. + if self.args.dryrun or self.args.verbose or force_verbose: + self.PrintCmd(cmd, env) + if self.args.dryrun: + return 0, '', '' + + ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output) + if self.args.verbose or force_verbose: + if ret: + self.Print(' -> returned %d' % ret) + if out: + self.Print(out, end='') + if err: + self.Print(err, end='', file=sys.stderr) + return ret, out, err + + def Call(self, cmd, env=None, buffer_output=True): + if buffer_output: + p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=env) + out, err = p.communicate() + else: + p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir, + env=env) + p.wait() + out = err = '' + return p.returncode, out, err + + def ExpandUser(self, path): + # This function largely exists so it can be overridden for testing. + return os.path.expanduser(path) + + def Exists(self, path): + # This function largely exists so it can be overridden for testing. + return os.path.exists(path) + + def Fetch(self, url): + # This function largely exists so it can be overridden for testing. + f = urllib2.urlopen(url) + contents = f.read() + f.close() + return contents + + def MaybeMakeDirectory(self, path): + try: + os.makedirs(path) + except OSError, e: + if e.errno != errno.EEXIST: + raise + + def PathJoin(self, *comps): + # This function largely exists so it can be overriden for testing. + return os.path.join(*comps) + + def Print(self, *args, **kwargs): + # This function largely exists so it can be overridden for testing. + print(*args, **kwargs) + if kwargs.get('stream', sys.stdout) == sys.stdout: + sys.stdout.flush() + + def ReadFile(self, path): + # This function largely exists so it can be overriden for testing. + with open(path) as fp: + return fp.read() + + def RelPath(self, path, start='.'): + # This function largely exists so it can be overriden for testing. + return os.path.relpath(path, start) + + def RemoveFile(self, path): + # This function largely exists so it can be overriden for testing. + os.remove(path) + + def RemoveDirectory(self, abs_path): + if self.platform == 'win32': + # In other places in chromium, we often have to retry this command + # because we're worried about other processes still holding on to + # file handles, but when MB is invoked, it will be early enough in the + # build that their should be no other processes to interfere. We + # can change this if need be. + self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path]) + else: + shutil.rmtree(abs_path, ignore_errors=True) + + def TempFile(self, mode='w'): + # This function largely exists so it can be overriden for testing. + return tempfile.NamedTemporaryFile(mode=mode, delete=False) + + def WriteFile(self, path, contents, force_verbose=False): + # This function largely exists so it can be overriden for testing. + if self.args.dryrun or self.args.verbose or force_verbose: + self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path)) + with open(path, 'w') as fp: + return fp.write(contents) + + +class MBErr(Exception): + pass + + +# See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful +# details of this next section, which handles escaping command lines +# so that they can be copied and pasted into a cmd window. +UNSAFE_FOR_SET = set('^<>&|') +UNSAFE_FOR_CMD = UNSAFE_FOR_SET.union(set('()%')) +ALL_META_CHARS = UNSAFE_FOR_CMD.union(set('"')) + + +def QuoteForSet(arg): + if any(a in UNSAFE_FOR_SET for a in arg): + arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg) + return arg + + +def QuoteForCmd(arg): + # First, escape the arg so that CommandLineToArgvW will parse it properly. + # From //tools/gyp/pylib/gyp/msvs_emulation.py:23. + if arg == '' or ' ' in arg or '"' in arg: + quote_re = re.compile(r'(\\*)"') + arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)) + + # Then check to see if the arg contains any metacharacters other than + # double quotes; if it does, quote everything (including the double + # quotes) for safety. + if any(a in UNSAFE_FOR_CMD for a in arg): + arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg) + return arg + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/tools/mb/mb_unittest.py b/tools/mb/mb_unittest.py new file mode 100755 index 0000000000..b70af005e0 --- /dev/null +++ b/tools/mb/mb_unittest.py @@ -0,0 +1,567 @@ +#!/usr/bin/python +# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +"""Tests for mb.py.""" + +import json +import StringIO +import os +import sys +import unittest + +import mb + + +class FakeMBW(mb.MetaBuildWrapper): + def __init__(self, win32=False): + super(FakeMBW, self).__init__() + + # Override vars for test portability. + if win32: + self.chromium_src_dir = 'c:\\fake_src' + self.default_config = 'c:\\fake_src\\tools\\mb\\mb_config.pyl' + self.default_isolate_map = ('c:\\fake_src\\testing\\buildbot\\' + 'gn_isolate_map.pyl') + self.platform = 'win32' + self.executable = 'c:\\python\\python.exe' + self.sep = '\\' + else: + self.chromium_src_dir = '/fake_src' + self.default_config = '/fake_src/tools/mb/mb_config.pyl' + self.default_isolate_map = '/fake_src/testing/buildbot/gn_isolate_map.pyl' + self.executable = '/usr/bin/python' + self.platform = 'linux2' + self.sep = '/' + + self.files = {} + self.calls = [] + self.cmds = [] + self.cross_compile = None + self.out = '' + self.err = '' + self.rmdirs = [] + + def ExpandUser(self, path): + return '$HOME/%s' % path + + def Exists(self, path): + return self.files.get(path) is not None + + def MaybeMakeDirectory(self, path): + self.files[path] = True + + def PathJoin(self, *comps): + return self.sep.join(comps) + + def ReadFile(self, path): + return self.files[path] + + def WriteFile(self, path, contents, force_verbose=False): + if self.args.dryrun or self.args.verbose or force_verbose: + self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path)) + self.files[path] = contents + + def Call(self, cmd, env=None, buffer_output=True): + if env: + self.cross_compile = env.get('GYP_CROSSCOMPILE') + self.calls.append(cmd) + if self.cmds: + return self.cmds.pop(0) + return 0, '', '' + + def Print(self, *args, **kwargs): + sep = kwargs.get('sep', ' ') + end = kwargs.get('end', '\n') + f = kwargs.get('file', sys.stdout) + if f == sys.stderr: + self.err += sep.join(args) + end + else: + self.out += sep.join(args) + end + + def TempFile(self, mode='w'): + return FakeFile(self.files) + + def RemoveFile(self, path): + del self.files[path] + + def RemoveDirectory(self, path): + self.rmdirs.append(path) + files_to_delete = [f for f in self.files if f.startswith(path)] + for f in files_to_delete: + self.files[f] = None + + +class FakeFile(object): + def __init__(self, files): + self.name = '/tmp/file' + self.buf = '' + self.files = files + + def write(self, contents): + self.buf += contents + + def close(self): + self.files[self.name] = self.buf + + +TEST_CONFIG = """\ +{ + 'masters': { + 'chromium': {}, + 'fake_master': { + 'fake_builder': 'gyp_rel_bot', + 'fake_gn_builder': 'gn_rel_bot', + 'fake_gyp_crosscompile_builder': 'gyp_crosscompile', + 'fake_gn_debug_builder': 'gn_debug_goma', + 'fake_gyp_builder': 'gyp_debug', + 'fake_gn_args_bot': '//build/args/bots/fake_master/fake_gn_args_bot.gn', + 'fake_multi_phase': { 'phase_1': 'gn_phase_1', 'phase_2': 'gn_phase_2'}, + }, + }, + 'configs': { + 'gyp_rel_bot': ['gyp', 'rel', 'goma'], + 'gn_debug_goma': ['gn', 'debug', 'goma'], + 'gyp_debug': ['gyp', 'debug', 'fake_feature1'], + 'gn_rel_bot': ['gn', 'rel', 'goma'], + 'gyp_crosscompile': ['gyp', 'crosscompile'], + 'gn_phase_1': ['gn', 'phase_1'], + 'gn_phase_2': ['gn', 'phase_2'], + }, + 'mixins': { + 'crosscompile': { + 'gyp_crosscompile': True, + }, + 'fake_feature1': { + 'gn_args': 'enable_doom_melon=true', + 'gyp_defines': 'doom_melon=1', + }, + 'gyp': {'type': 'gyp'}, + 'gn': {'type': 'gn'}, + 'goma': { + 'gn_args': 'use_goma=true', + 'gyp_defines': 'goma=1', + }, + 'phase_1': { + 'gn_args': 'phase=1', + 'gyp_args': 'phase=1', + }, + 'phase_2': { + 'gn_args': 'phase=2', + 'gyp_args': 'phase=2', + }, + 'rel': { + 'gn_args': 'is_debug=false', + }, + 'debug': { + 'gn_args': 'is_debug=true', + }, + }, +} +""" + + +TEST_BAD_CONFIG = """\ +{ + 'configs': { + 'gn_rel_bot_1': ['gn', 'rel', 'chrome_with_codecs'], + 'gn_rel_bot_2': ['gn', 'rel', 'bad_nested_config'], + }, + 'masters': { + 'chromium': { + 'a': 'gn_rel_bot_1', + 'b': 'gn_rel_bot_2', + }, + }, + 'mixins': { + 'gn': {'type': 'gn'}, + 'chrome_with_codecs': { + 'gn_args': 'proprietary_codecs=true', + }, + 'bad_nested_config': { + 'mixins': ['chrome_with_codecs'], + }, + 'rel': { + 'gn_args': 'is_debug=false', + }, + }, +} +""" + + +GYP_HACKS_CONFIG = """\ +{ + 'masters': { + 'chromium': {}, + 'fake_master': { + 'fake_builder': 'fake_config', + }, + }, + 'configs': { + 'fake_config': ['fake_mixin'], + }, + 'mixins': { + 'fake_mixin': { + 'type': 'gyp', + 'gn_args': '', + 'gyp_defines': + ('foo=bar llvm_force_head_revision=1 ' + 'gyp_link_concurrency=1 baz=1'), + }, + }, +} +""" + + +class UnitTest(unittest.TestCase): + def fake_mbw(self, files=None, win32=False): + mbw = FakeMBW(win32=win32) + mbw.files.setdefault(mbw.default_config, TEST_CONFIG) + mbw.files.setdefault( + mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'), + '''{ + "foo_unittests": { + "label": "//foo:foo_unittests", + "type": "console_test_launcher", + "args": [], + }, + }''') + mbw.files.setdefault( + mbw.ToAbsPath('//build/args/bots/fake_master/fake_gn_args_bot.gn'), + 'is_debug = false\n') + if files: + for path, contents in files.items(): + mbw.files[path] = contents + return mbw + + def check(self, args, mbw=None, files=None, out=None, err=None, ret=None): + if not mbw: + mbw = self.fake_mbw(files) + + actual_ret = mbw.Main(args) + + self.assertEqual(actual_ret, ret) + if out is not None: + self.assertEqual(mbw.out, out) + if err is not None: + self.assertEqual(mbw.err, err) + return mbw + + def test_clobber(self): + files = { + '/fake_src/out/Debug': None, + '/fake_src/out/Debug/mb_type': None, + } + mbw = self.fake_mbw(files) + + # The first time we run this, the build dir doesn't exist, so no clobber. + self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0) + self.assertEqual(mbw.rmdirs, []) + self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn') + + # The second time we run this, the build dir exists and matches, so no + # clobber. + self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0) + self.assertEqual(mbw.rmdirs, []) + self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn') + + # Now we switch build types; this should result in a clobber. + self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0) + self.assertEqual(mbw.rmdirs, ['/fake_src/out/Debug']) + self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp') + + # Now we delete mb_type; this checks the case where the build dir + # exists but wasn't populated by mb; this should also result in a clobber. + del mbw.files['/fake_src/out/Debug/mb_type'] + self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0) + self.assertEqual(mbw.rmdirs, + ['/fake_src/out/Debug', '/fake_src/out/Debug']) + self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp') + + def test_gn_analyze(self): + files = {'/tmp/in.json': '''{\ + "files": ["foo/foo_unittest.cc"], + "test_targets": ["foo_unittests"], + "additional_compile_targets": ["all"] + }''', + '/tmp/out.json.gn': '''{\ + "status": "Found dependency", + "compile_targets": ["//foo:foo_unittests"], + "test_targets": ["//foo:foo_unittests"] + }'''} + + mbw = self.fake_mbw(files) + mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '') + + self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default', + '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0) + out = json.loads(mbw.files['/tmp/out.json']) + self.assertEqual(out, { + 'status': 'Found dependency', + 'compile_targets': ['foo:foo_unittests'], + 'test_targets': ['foo_unittests'] + }) + + def test_gn_gen(self): + mbw = self.fake_mbw() + self.check(['gen', '-c', 'gn_debug_goma', '//out/Default', '-g', '/goma'], + mbw=mbw, ret=0) + self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'], + ('goma_dir = "/goma"\n' + 'is_debug = true\n' + 'use_goma = true\n')) + + # Make sure we log both what is written to args.gn and the command line. + self.assertIn('Writing """', mbw.out) + self.assertIn('/fake_src/buildtools/linux64/gn gen //out/Default --check', + mbw.out) + + mbw = self.fake_mbw(win32=True) + self.check(['gen', '-c', 'gn_debug_goma', '-g', 'c:\\goma', '//out/Debug'], + mbw=mbw, ret=0) + self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'], + ('goma_dir = "c:\\\\goma"\n' + 'is_debug = true\n' + 'use_goma = true\n')) + self.assertIn('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug ' + '--check\n', mbw.out) + + mbw = self.fake_mbw() + self.check(['gen', '-m', 'fake_master', '-b', 'fake_gn_args_bot', + '//out/Debug'], + mbw=mbw, ret=0) + self.assertEqual( + mbw.files['/fake_src/out/Debug/args.gn'], + 'import("//build/args/bots/fake_master/fake_gn_args_bot.gn")\n') + + + def test_gn_gen_fails(self): + mbw = self.fake_mbw() + mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '') + self.check(['gen', '-c', 'gn_debug_goma', '//out/Default'], mbw=mbw, ret=1) + + def test_gn_gen_swarming(self): + files = { + '/tmp/swarming_targets': 'base_unittests\n', + '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( + "{'base_unittests': {" + " 'label': '//base:base_unittests'," + " 'type': 'raw'," + " 'args': []," + "}}\n" + ), + '/fake_src/out/Default/base_unittests.runtime_deps': ( + "base_unittests\n" + ), + } + mbw = self.fake_mbw(files) + self.check(['gen', + '-c', 'gn_debug_goma', + '--swarming-targets-file', '/tmp/swarming_targets', + '//out/Default'], mbw=mbw, ret=0) + self.assertIn('/fake_src/out/Default/base_unittests.isolate', + mbw.files) + self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json', + mbw.files) + + def test_gn_gen_swarming_script(self): + files = { + '/tmp/swarming_targets': 'cc_perftests\n', + '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( + "{'cc_perftests': {" + " 'label': '//cc:cc_perftests'," + " 'type': 'script'," + " 'script': '/fake_src/out/Default/test_script.py'," + " 'args': []," + "}}\n" + ), + 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': ( + "cc_perftests\n" + ), + } + mbw = self.fake_mbw(files=files, win32=True) + self.check(['gen', + '-c', 'gn_debug_goma', + '--swarming-targets-file', '/tmp/swarming_targets', + '--isolate-map-file', + '/fake_src/testing/buildbot/gn_isolate_map.pyl', + '//out/Default'], mbw=mbw, ret=0) + self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate', + mbw.files) + self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json', + mbw.files) + + + def test_gn_isolate(self): + files = { + '/fake_src/out/Default/toolchain.ninja': "", + '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( + "{'base_unittests': {" + " 'label': '//base:base_unittests'," + " 'type': 'raw'," + " 'args': []," + "}}\n" + ), + '/fake_src/out/Default/base_unittests.runtime_deps': ( + "base_unittests\n" + ), + } + self.check(['isolate', '-c', 'gn_debug_goma', '//out/Default', + 'base_unittests'], files=files, ret=0) + + # test running isolate on an existing build_dir + files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n' + self.check(['isolate', '//out/Default', 'base_unittests'], + files=files, ret=0) + + files['/fake_src/out/Default/mb_type'] = 'gn\n' + self.check(['isolate', '//out/Default', 'base_unittests'], + files=files, ret=0) + + def test_gn_run(self): + files = { + '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( + "{'base_unittests': {" + " 'label': '//base:base_unittests'," + " 'type': 'raw'," + " 'args': []," + "}}\n" + ), + '/fake_src/out/Default/base_unittests.runtime_deps': ( + "base_unittests\n" + ), + } + self.check(['run', '-c', 'gn_debug_goma', '//out/Default', + 'base_unittests'], files=files, ret=0) + + def test_gn_lookup(self): + self.check(['lookup', '-c', 'gn_debug_goma'], ret=0) + + def test_gn_lookup_goma_dir_expansion(self): + self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0, + out=('\n' + 'Writing """\\\n' + 'goma_dir = "/foo"\n' + 'is_debug = false\n' + 'use_goma = true\n' + '""" to _path_/args.gn.\n\n' + '/fake_src/buildtools/linux64/gn gen _path_\n')) + + def test_gyp_analyze(self): + mbw = self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release', + '/tmp/in.json', '/tmp/out.json'], ret=0) + self.assertIn('analyzer', mbw.calls[0]) + + def test_gyp_crosscompile(self): + mbw = self.fake_mbw() + self.check(['gen', '-c', 'gyp_crosscompile', '//out/Release'], + mbw=mbw, ret=0) + self.assertTrue(mbw.cross_compile) + + def test_gyp_gen(self): + self.check(['gen', '-c', 'gyp_rel_bot', '-g', '/goma', '//out/Release'], + ret=0, + out=("GYP_DEFINES='goma=1 gomadir=/goma'\n" + "python build/gyp_chromium -G output_dir=out\n")) + + mbw = self.fake_mbw(win32=True) + self.check(['gen', '-c', 'gyp_rel_bot', '-g', 'c:\\goma', '//out/Release'], + mbw=mbw, ret=0, + out=("set GYP_DEFINES=goma=1 gomadir='c:\\goma'\n" + "python build\\gyp_chromium -G output_dir=out\n")) + + def test_gyp_gen_fails(self): + mbw = self.fake_mbw() + mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '') + self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], mbw=mbw, ret=1) + + def test_gyp_lookup_goma_dir_expansion(self): + self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0, + out=("GYP_DEFINES='goma=1 gomadir=/foo'\n" + "python build/gyp_chromium -G output_dir=_path_\n")) + + def test_help(self): + orig_stdout = sys.stdout + try: + sys.stdout = StringIO.StringIO() + self.assertRaises(SystemExit, self.check, ['-h']) + self.assertRaises(SystemExit, self.check, ['help']) + self.assertRaises(SystemExit, self.check, ['help', 'gen']) + finally: + sys.stdout = orig_stdout + + def test_multiple_phases(self): + # Check that not passing a --phase to a multi-phase builder fails. + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase'], + ret=1) + self.assertIn('Must specify a build --phase', mbw.out) + + # Check that passing a --phase to a single-phase builder fails. + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_gn_builder', + '--phase', 'phase_1'], ret=1) + self.assertIn('Must not specify a build --phase', mbw.out) + + # Check that passing a wrong phase key to a multi-phase builder fails. + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + '--phase', 'wrong_phase'], ret=1) + self.assertIn('Phase wrong_phase doesn\'t exist', mbw.out) + + # Check that passing a correct phase key to a multi-phase builder passes. + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + '--phase', 'phase_1'], ret=0) + self.assertIn('phase = 1', mbw.out) + + mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + '--phase', 'phase_2'], ret=0) + self.assertIn('phase = 2', mbw.out) + + def test_validate(self): + mbw = self.fake_mbw() + self.check(['validate'], mbw=mbw, ret=0) + + def test_bad_validate(self): + mbw = self.fake_mbw() + mbw.files[mbw.default_config] = TEST_BAD_CONFIG + self.check(['validate'], mbw=mbw, ret=1) + + def test_gyp_env_hacks(self): + mbw = self.fake_mbw() + mbw.files[mbw.default_config] = GYP_HACKS_CONFIG + self.check(['lookup', '-c', 'fake_config'], mbw=mbw, + ret=0, + out=("GYP_DEFINES='foo=bar baz=1'\n" + "GYP_LINK_CONCURRENCY=1\n" + "LLVM_FORCE_HEAD_REVISION=1\n" + "python build/gyp_chromium -G output_dir=_path_\n")) + + +if __name__ == '__main__': + unittest.main() + + def test_validate(self): + mbw = self.fake_mbw() + self.check(['validate'], mbw=mbw, ret=0) + + def test_bad_validate(self): + mbw = self.fake_mbw() + mbw.files[mbw.default_config] = TEST_BAD_CONFIG + self.check(['validate'], mbw=mbw, ret=1) + + def test_gyp_env_hacks(self): + mbw = self.fake_mbw() + mbw.files[mbw.default_config] = GYP_HACKS_CONFIG + self.check(['lookup', '-c', 'fake_config'], mbw=mbw, + ret=0, + out=("GYP_DEFINES='foo=bar baz=1'\n" + "GYP_LINK_CONCURRENCY=1\n" + "LLVM_FORCE_HEAD_REVISION=1\n" + "python build/gyp_chromium -G output_dir=_path_\n")) + + +if __name__ == '__main__': + unittest.main()