2015-01-23 23:15:10 +01:00
|
|
|
// Copyright 2014 Google Inc. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
package blueprint
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-08-10 00:13:12 +02:00
|
|
|
"io/ioutil"
|
2017-08-24 02:30:05 +02:00
|
|
|
"os"
|
2014-05-28 01:34:41 +02:00
|
|
|
"path/filepath"
|
|
|
|
"reflect"
|
Add support for targets in Blueprints.
The default target selector uses the name of the host OS.
Modules can implement their own target selector by implementing
the context.TargetSelector interface and its unique selectTarget()
method.
Targets are defined this way in Blueprint files:
cc_library {
name: "libmylib",
deps: ["libmath", "libutils"],
zones: ["frontend"],
srcs: ["main.cpp"],
targets: {
darwin: {
moduleCflags: "-framework OpenGL -framework GLUT",
},
linux: {
deps: ["libx11headers", "libglu"],
},
},
}
In this example, a set of C flags are defined on OS X only and on
Linux, two new dependencies are added.
When a target is selected, its properties are merged with the properties
of the modules:
- a list is appended to the original list (see deps above)
- a string is concatenated to the original string
- a bool replaces the original bool
- a map adds or replaces the key/value pairs of the original map
Change-Id: Ic627d47f795d6a4ff56ca5f6f099cad157621af1
2014-08-13 02:50:11 +02:00
|
|
|
"runtime"
|
2014-05-28 01:34:41 +02:00
|
|
|
"sort"
|
|
|
|
"strings"
|
2016-11-01 19:10:51 +01:00
|
|
|
"sync"
|
2015-07-01 01:05:22 +02:00
|
|
|
"sync/atomic"
|
2014-05-28 01:34:41 +02:00
|
|
|
"text/scanner"
|
|
|
|
"text/template"
|
2015-04-21 01:50:54 +02:00
|
|
|
|
|
|
|
"github.com/google/blueprint/parser"
|
2017-02-01 22:21:35 +01:00
|
|
|
"github.com/google/blueprint/pathtools"
|
2015-04-21 01:50:54 +02:00
|
|
|
"github.com/google/blueprint/proptools"
|
2014-05-28 01:34:41 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var ErrBuildActionsNotReady = errors.New("build actions are not ready")
|
|
|
|
|
|
|
|
const maxErrors = 10
|
2017-11-15 23:49:48 +01:00
|
|
|
const MockModuleListFile = "bplist"
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2014-06-13 05:06:50 +02:00
|
|
|
// A Context contains all the state needed to parse a set of Blueprints files
|
|
|
|
// and generate a Ninja file. The process of generating a Ninja file proceeds
|
|
|
|
// through a series of four phases. Each phase corresponds with a some methods
|
|
|
|
// on the Context object
|
|
|
|
//
|
|
|
|
// Phase Methods
|
|
|
|
// ------------ -------------------------------------------
|
2014-09-25 02:51:52 +02:00
|
|
|
// 1. Registration RegisterModuleType, RegisterSingletonType
|
2014-06-13 05:06:50 +02:00
|
|
|
//
|
|
|
|
// 2. Parse ParseBlueprintsFiles, Parse
|
|
|
|
//
|
2014-09-25 02:51:52 +02:00
|
|
|
// 3. Generate ResolveDependencies, PrepareBuildActions
|
2014-06-13 05:06:50 +02:00
|
|
|
//
|
|
|
|
// 4. Write WriteBuildFile
|
|
|
|
//
|
|
|
|
// The registration phase prepares the context to process Blueprints files
|
|
|
|
// containing various types of modules. The parse phase reads in one or more
|
|
|
|
// Blueprints files and validates their contents against the module types that
|
|
|
|
// have been registered. The generate phase then analyzes the parsed Blueprints
|
|
|
|
// contents to create an internal representation for the build actions that must
|
|
|
|
// be performed. This phase also performs validation of the module dependencies
|
|
|
|
// and property values defined in the parsed Blueprints files. Finally, the
|
|
|
|
// write phase generates the Ninja manifest text based on the generated build
|
|
|
|
// actions.
|
2014-05-28 01:34:41 +02:00
|
|
|
type Context struct {
|
|
|
|
// set at instantiation
|
2015-03-11 04:08:19 +01:00
|
|
|
moduleFactories map[string]ModuleFactory
|
2017-11-11 00:12:08 +01:00
|
|
|
nameInterface NameInterface
|
2016-05-17 23:58:05 +02:00
|
|
|
moduleGroups []*moduleGroup
|
2015-03-11 04:08:19 +01:00
|
|
|
moduleInfo map[Module]*moduleInfo
|
|
|
|
modulesSorted []*moduleInfo
|
2017-11-07 22:29:54 +01:00
|
|
|
preSingletonInfo []*singletonInfo
|
2015-08-26 02:58:17 +02:00
|
|
|
singletonInfo []*singletonInfo
|
2015-03-11 04:08:19 +01:00
|
|
|
mutatorInfo []*mutatorInfo
|
2016-08-10 21:56:40 +02:00
|
|
|
earlyMutatorInfo []*mutatorInfo
|
2015-03-11 04:08:19 +01:00
|
|
|
variantMutatorNames []string
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
depsModified uint32 // positive if a mutator modified the dependencies
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
dependenciesReady bool // set to true on a successful ResolveDependencies
|
|
|
|
buildActionsReady bool // set to true on a successful PrepareBuildActions
|
|
|
|
|
|
|
|
// set by SetIgnoreUnknownModuleTypes
|
|
|
|
ignoreUnknownModuleTypes bool
|
|
|
|
|
2015-12-18 00:49:30 +01:00
|
|
|
// set by SetAllowMissingDependencies
|
|
|
|
allowMissingDependencies bool
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
// set during PrepareBuildActions
|
2015-11-26 00:29:32 +01:00
|
|
|
pkgNames map[*packageContext]string
|
2017-11-07 22:29:54 +01:00
|
|
|
liveGlobals *liveTracker
|
2014-05-28 01:34:41 +02:00
|
|
|
globalVariables map[Variable]*ninjaString
|
|
|
|
globalPools map[Pool]*poolDef
|
|
|
|
globalRules map[Rule]*ruleDef
|
|
|
|
|
|
|
|
// set during PrepareBuildActions
|
2015-11-19 01:01:01 +01:00
|
|
|
ninjaBuildDir *ninjaString // The builddir special Ninja variable
|
2014-05-28 01:34:41 +02:00
|
|
|
requiredNinjaMajor int // For the ninja_required_version variable
|
|
|
|
requiredNinjaMinor int // For the ninja_required_version variable
|
|
|
|
requiredNinjaMicro int // For the ninja_required_version variable
|
2014-09-25 05:26:52 +02:00
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
// set lazily by sortedModuleGroups
|
|
|
|
cachedSortedModuleGroups []*moduleGroup
|
2016-06-03 00:30:20 +02:00
|
|
|
|
2016-11-01 19:10:51 +01:00
|
|
|
globs map[string]GlobPath
|
|
|
|
globLock sync.Mutex
|
|
|
|
|
2017-08-10 00:13:12 +02:00
|
|
|
fs pathtools.FileSystem
|
|
|
|
moduleListFile string
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2014-06-13 05:06:50 +02:00
|
|
|
// An Error describes a problem that was encountered that is related to a
|
|
|
|
// particular location in a Blueprints file.
|
2016-10-08 02:13:10 +02:00
|
|
|
type BlueprintError struct {
|
2014-06-13 05:06:50 +02:00
|
|
|
Err error // the error that occurred
|
|
|
|
Pos scanner.Position // the relevant Blueprints file location
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2016-10-08 02:13:10 +02:00
|
|
|
// A ModuleError describes a problem that was encountered that is related to a
|
|
|
|
// particular module in a Blueprints file
|
|
|
|
type ModuleError struct {
|
|
|
|
BlueprintError
|
|
|
|
module *moduleInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
// A PropertyError describes a problem that was encountered that is related to a
|
|
|
|
// particular property in a Blueprints file
|
|
|
|
type PropertyError struct {
|
|
|
|
ModuleError
|
|
|
|
property string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *BlueprintError) Error() string {
|
|
|
|
return fmt.Sprintf("%s: %s", e.Pos, e.Err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *ModuleError) Error() string {
|
|
|
|
return fmt.Sprintf("%s: %s: %s", e.Pos, e.module, e.Err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *PropertyError) Error() string {
|
|
|
|
return fmt.Sprintf("%s: %s: %s: %s", e.Pos, e.module, e.property, e.Err)
|
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
type localBuildActions struct {
|
|
|
|
variables []*localVariable
|
|
|
|
rules []*localRule
|
|
|
|
buildDefs []*buildDef
|
|
|
|
}
|
|
|
|
|
2014-12-18 01:12:41 +01:00
|
|
|
type moduleGroup struct {
|
2015-03-11 08:57:25 +01:00
|
|
|
name string
|
|
|
|
ninjaName string
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2014-12-18 01:12:41 +01:00
|
|
|
modules []*moduleInfo
|
2017-11-11 00:12:08 +01:00
|
|
|
|
|
|
|
namespace Namespace
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2014-12-18 01:12:41 +01:00
|
|
|
type moduleInfo struct {
|
2015-03-11 08:57:25 +01:00
|
|
|
// set during Parse
|
|
|
|
typeName string
|
2017-07-28 23:32:36 +02:00
|
|
|
factory ModuleFactory
|
2015-03-11 08:57:25 +01:00
|
|
|
relBlueprintsFile string
|
|
|
|
pos scanner.Position
|
|
|
|
propertyPos map[string]scanner.Position
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
variantName string
|
|
|
|
variant variationMap
|
|
|
|
dependencyVariant variationMap
|
2015-03-11 22:35:41 +01:00
|
|
|
|
2017-07-28 23:31:03 +02:00
|
|
|
logicModule Module
|
|
|
|
group *moduleGroup
|
|
|
|
properties []interface{}
|
2014-12-19 01:28:54 +01:00
|
|
|
|
|
|
|
// set during ResolveDependencies
|
2016-04-12 00:47:28 +02:00
|
|
|
directDeps []depInfo
|
2015-12-18 00:49:30 +01:00
|
|
|
missingDeps []string
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
// set during updateDependencies
|
|
|
|
reverseDeps []*moduleInfo
|
2016-08-11 20:09:00 +02:00
|
|
|
forwardDeps []*moduleInfo
|
2015-03-11 23:43:52 +01:00
|
|
|
|
|
|
|
// used by parallelVisitAllBottomUp
|
|
|
|
waitingCount int
|
|
|
|
|
2014-12-19 01:28:54 +01:00
|
|
|
// set during each runMutator
|
|
|
|
splitModules []*moduleInfo
|
2015-03-12 00:17:52 +01:00
|
|
|
|
|
|
|
// set during PrepareBuildActions
|
|
|
|
actionDefs localBuildActions
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2016-04-12 00:47:28 +02:00
|
|
|
type depInfo struct {
|
|
|
|
module *moduleInfo
|
|
|
|
tag DependencyTag
|
|
|
|
}
|
|
|
|
|
2016-05-17 23:58:05 +02:00
|
|
|
func (module *moduleInfo) Name() string {
|
|
|
|
return module.group.name
|
|
|
|
}
|
|
|
|
|
2016-01-07 22:43:09 +01:00
|
|
|
func (module *moduleInfo) String() string {
|
2016-05-17 23:58:05 +02:00
|
|
|
s := fmt.Sprintf("module %q", module.Name())
|
2016-01-07 22:43:09 +01:00
|
|
|
if module.variantName != "" {
|
|
|
|
s += fmt.Sprintf(" variant %q", module.variantName)
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
func (module *moduleInfo) namespace() Namespace {
|
|
|
|
return module.group.namespace
|
|
|
|
}
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
// A Variation is a way that a variant of a module differs from other variants of the same module.
|
|
|
|
// For example, two variants of the same module might have Variation{"arch","arm"} and
|
|
|
|
// Variation{"arch","arm64"}
|
|
|
|
type Variation struct {
|
|
|
|
// Mutator is the axis on which this variation applies, i.e. "arch" or "link"
|
2015-03-11 04:08:19 +01:00
|
|
|
Mutator string
|
2015-03-14 00:02:36 +01:00
|
|
|
// Variation is the name of the variation on the axis, i.e. "arm" or "arm64" for arch, or
|
|
|
|
// "shared" or "static" for link.
|
|
|
|
Variation string
|
2015-03-11 04:08:19 +01:00
|
|
|
}
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
// A variationMap stores a map of Mutator to Variation to specify a variant of a module.
|
|
|
|
type variationMap map[string]string
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
func (vm variationMap) clone() variationMap {
|
|
|
|
newVm := make(variationMap)
|
2015-03-11 22:35:41 +01:00
|
|
|
for k, v := range vm {
|
|
|
|
newVm[k] = v
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
2015-03-11 22:35:41 +01:00
|
|
|
|
|
|
|
return newVm
|
|
|
|
}
|
|
|
|
|
2015-05-08 20:14:54 +02:00
|
|
|
// Compare this variationMap to another one. Returns true if the every entry in this map
|
|
|
|
// is either the same in the other map or doesn't exist in the other map.
|
|
|
|
func (vm variationMap) subset(other variationMap) bool {
|
|
|
|
for k, v1 := range vm {
|
|
|
|
if v2, ok := other[k]; ok && v1 != v2 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
func (vm variationMap) equal(other variationMap) bool {
|
2015-03-11 22:35:41 +01:00
|
|
|
return reflect.DeepEqual(vm, other)
|
2014-12-18 01:12:41 +01:00
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
type singletonInfo struct {
|
2014-09-25 02:51:52 +02:00
|
|
|
// set during RegisterSingletonType
|
|
|
|
factory SingletonFactory
|
2014-05-28 01:34:41 +02:00
|
|
|
singleton Singleton
|
2015-08-26 02:58:17 +02:00
|
|
|
name string
|
2014-05-28 01:34:41 +02:00
|
|
|
|
|
|
|
// set during PrepareBuildActions
|
|
|
|
actionDefs localBuildActions
|
|
|
|
}
|
|
|
|
|
2014-12-19 01:28:54 +01:00
|
|
|
type mutatorInfo struct {
|
|
|
|
// set during RegisterMutator
|
2015-01-03 00:19:28 +01:00
|
|
|
topDownMutator TopDownMutator
|
|
|
|
bottomUpMutator BottomUpMutator
|
|
|
|
name string
|
2016-08-06 07:30:44 +02:00
|
|
|
parallel bool
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2017-07-28 23:32:36 +02:00
|
|
|
func newContext() *Context {
|
|
|
|
return &Context{
|
2017-11-07 22:29:54 +01:00
|
|
|
moduleFactories: make(map[string]ModuleFactory),
|
2017-11-11 00:12:08 +01:00
|
|
|
nameInterface: NewSimpleNameInterface(),
|
2017-11-07 22:29:54 +01:00
|
|
|
moduleInfo: make(map[Module]*moduleInfo),
|
|
|
|
globs: make(map[string]GlobPath),
|
|
|
|
fs: pathtools.OsFs,
|
|
|
|
ninjaBuildDir: nil,
|
|
|
|
requiredNinjaMajor: 1,
|
|
|
|
requiredNinjaMinor: 7,
|
|
|
|
requiredNinjaMicro: 0,
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
2017-07-28 23:32:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewContext creates a new Context object. The created context initially has
|
|
|
|
// no module or singleton factories registered, so the RegisterModuleFactory and
|
|
|
|
// RegisterSingletonFactory methods must be called before it can do anything
|
|
|
|
// useful.
|
|
|
|
func NewContext() *Context {
|
|
|
|
ctx := newContext()
|
2015-10-29 23:32:56 +01:00
|
|
|
|
|
|
|
ctx.RegisterBottomUpMutator("blueprint_deps", blueprintDepsMutator)
|
|
|
|
|
|
|
|
return ctx
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2014-09-25 02:51:52 +02:00
|
|
|
// A ModuleFactory function creates a new Module object. See the
|
|
|
|
// Context.RegisterModuleType method for details about how a registered
|
|
|
|
// ModuleFactory is used by a Context.
|
|
|
|
type ModuleFactory func() (m Module, propertyStructs []interface{})
|
|
|
|
|
2014-06-13 05:06:50 +02:00
|
|
|
// RegisterModuleType associates a module type name (which can appear in a
|
2014-09-25 02:51:52 +02:00
|
|
|
// Blueprints file) with a Module factory function. When the given module type
|
|
|
|
// name is encountered in a Blueprints file during parsing, the Module factory
|
|
|
|
// is invoked to instantiate a new Module object to handle the build action
|
2014-12-19 01:28:54 +01:00
|
|
|
// generation for the module. If a Mutator splits a module into multiple variants,
|
|
|
|
// the factory is invoked again to create a new Module for each variant.
|
2014-06-13 05:06:50 +02:00
|
|
|
//
|
2014-09-25 02:51:52 +02:00
|
|
|
// The module type names given here must be unique for the context. The factory
|
|
|
|
// function should be a named function so that its package and name can be
|
|
|
|
// included in the generated Ninja file for debugging purposes.
|
|
|
|
//
|
|
|
|
// The factory function returns two values. The first is the newly created
|
|
|
|
// Module object. The second is a slice of pointers to that Module object's
|
|
|
|
// properties structs. Each properties struct is examined when parsing a module
|
|
|
|
// definition of this type in a Blueprints file. Exported fields of the
|
|
|
|
// properties structs are automatically set to the property values specified in
|
|
|
|
// the Blueprints file. The properties struct field names determine the name of
|
|
|
|
// the Blueprints file properties that are used - the Blueprints property name
|
|
|
|
// matches that of the properties struct field name with the first letter
|
|
|
|
// converted to lower-case.
|
|
|
|
//
|
|
|
|
// The fields of the properties struct must be either []string, a string, or
|
|
|
|
// bool. The Context will panic if a Module gets instantiated with a properties
|
|
|
|
// struct containing a field that is not one these supported types.
|
|
|
|
//
|
|
|
|
// Any properties that appear in the Blueprints files that are not built-in
|
|
|
|
// module properties (such as "name" and "deps") and do not have a corresponding
|
|
|
|
// field in the returned module properties struct result in an error during the
|
|
|
|
// Context's parse phase.
|
|
|
|
//
|
|
|
|
// As an example, the follow code:
|
|
|
|
//
|
|
|
|
// type myModule struct {
|
|
|
|
// properties struct {
|
|
|
|
// Foo string
|
|
|
|
// Bar []string
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// func NewMyModule() (blueprint.Module, []interface{}) {
|
|
|
|
// module := new(myModule)
|
|
|
|
// properties := &module.properties
|
|
|
|
// return module, []interface{}{properties}
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// func main() {
|
|
|
|
// ctx := blueprint.NewContext()
|
|
|
|
// ctx.RegisterModuleType("my_module", NewMyModule)
|
|
|
|
// // ...
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// would support parsing a module defined in a Blueprints file as follows:
|
|
|
|
//
|
|
|
|
// my_module {
|
|
|
|
// name: "myName",
|
|
|
|
// foo: "my foo string",
|
|
|
|
// bar: ["my", "bar", "strings"],
|
|
|
|
// }
|
|
|
|
//
|
2015-01-08 01:22:45 +01:00
|
|
|
// The factory function may be called from multiple goroutines. Any accesses
|
|
|
|
// to global variables must be synchronized.
|
2014-09-25 02:51:52 +02:00
|
|
|
func (c *Context) RegisterModuleType(name string, factory ModuleFactory) {
|
|
|
|
if _, present := c.moduleFactories[name]; present {
|
2014-05-28 01:34:41 +02:00
|
|
|
panic(errors.New("module type name is already registered"))
|
|
|
|
}
|
2014-09-25 02:51:52 +02:00
|
|
|
c.moduleFactories[name] = factory
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2014-09-25 02:51:52 +02:00
|
|
|
// A SingletonFactory function creates a new Singleton object. See the
|
|
|
|
// Context.RegisterSingletonType method for details about how a registered
|
|
|
|
// SingletonFactory is used by a Context.
|
|
|
|
type SingletonFactory func() Singleton
|
|
|
|
|
|
|
|
// RegisterSingletonType registers a singleton type that will be invoked to
|
|
|
|
// generate build actions. Each registered singleton type is instantiated and
|
2015-08-26 02:58:17 +02:00
|
|
|
// and invoked exactly once as part of the generate phase. Each registered
|
|
|
|
// singleton is invoked in registration order.
|
2014-09-25 02:51:52 +02:00
|
|
|
//
|
|
|
|
// The singleton type names given here must be unique for the context. The
|
|
|
|
// factory function should be a named function so that its package and name can
|
|
|
|
// be included in the generated Ninja file for debugging purposes.
|
|
|
|
func (c *Context) RegisterSingletonType(name string, factory SingletonFactory) {
|
2015-08-26 02:58:17 +02:00
|
|
|
for _, s := range c.singletonInfo {
|
|
|
|
if s.name == name {
|
|
|
|
panic(errors.New("singleton name is already registered"))
|
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
2014-09-25 02:51:52 +02:00
|
|
|
|
2015-08-26 02:58:17 +02:00
|
|
|
c.singletonInfo = append(c.singletonInfo, &singletonInfo{
|
2014-09-25 02:51:52 +02:00
|
|
|
factory: factory,
|
|
|
|
singleton: factory(),
|
2015-08-26 02:58:17 +02:00
|
|
|
name: name,
|
|
|
|
})
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2017-11-07 22:29:54 +01:00
|
|
|
// RegisterPreSingletonType registers a presingleton type that will be invoked to
|
|
|
|
// generate build actions before any Blueprint files have been read. Each registered
|
|
|
|
// presingleton type is instantiated and invoked exactly once at the beginning of the
|
|
|
|
// parse phase. Each registered presingleton is invoked in registration order.
|
|
|
|
//
|
|
|
|
// The presingleton type names given here must be unique for the context. The
|
|
|
|
// factory function should be a named function so that its package and name can
|
|
|
|
// be included in the generated Ninja file for debugging purposes.
|
|
|
|
func (c *Context) RegisterPreSingletonType(name string, factory SingletonFactory) {
|
|
|
|
for _, s := range c.preSingletonInfo {
|
|
|
|
if s.name == name {
|
|
|
|
panic(errors.New("presingleton name is already registered"))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.preSingletonInfo = append(c.preSingletonInfo, &singletonInfo{
|
|
|
|
factory: factory,
|
|
|
|
singleton: factory(),
|
|
|
|
name: name,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
func (c *Context) SetNameInterface(i NameInterface) {
|
|
|
|
c.nameInterface = i
|
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
func singletonPkgPath(singleton Singleton) string {
|
|
|
|
typ := reflect.TypeOf(singleton)
|
|
|
|
for typ.Kind() == reflect.Ptr {
|
|
|
|
typ = typ.Elem()
|
|
|
|
}
|
|
|
|
return typ.PkgPath()
|
|
|
|
}
|
|
|
|
|
|
|
|
func singletonTypeName(singleton Singleton) string {
|
|
|
|
typ := reflect.TypeOf(singleton)
|
|
|
|
for typ.Kind() == reflect.Ptr {
|
|
|
|
typ = typ.Elem()
|
|
|
|
}
|
|
|
|
return typ.PkgPath() + "." + typ.Name()
|
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
// RegisterTopDownMutator registers a mutator that will be invoked to propagate dependency info
|
|
|
|
// top-down between Modules. Each registered mutator is invoked in registration order (mixing
|
|
|
|
// TopDownMutators and BottomUpMutators) once per Module, and the invocation on any module will
|
|
|
|
// have returned before it is in invoked on any of its dependencies.
|
2014-12-19 01:28:54 +01:00
|
|
|
//
|
2015-03-11 04:08:19 +01:00
|
|
|
// The mutator type names given here must be unique to all top down mutators in
|
|
|
|
// the Context.
|
2016-08-11 20:09:00 +02:00
|
|
|
//
|
|
|
|
// Returns a MutatorHandle, on which Parallel can be called to set the mutator to visit modules in
|
|
|
|
// parallel while maintaining ordering.
|
|
|
|
func (c *Context) RegisterTopDownMutator(name string, mutator TopDownMutator) MutatorHandle {
|
2014-12-19 01:28:54 +01:00
|
|
|
for _, m := range c.mutatorInfo {
|
|
|
|
if m.name == name && m.topDownMutator != nil {
|
|
|
|
panic(fmt.Errorf("mutator name %s is already registered", name))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
info := &mutatorInfo{
|
2014-12-19 01:28:54 +01:00
|
|
|
topDownMutator: mutator,
|
2015-01-03 00:19:28 +01:00
|
|
|
name: name,
|
2016-08-11 20:09:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
c.mutatorInfo = append(c.mutatorInfo, info)
|
|
|
|
|
|
|
|
return info
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
// RegisterBottomUpMutator registers a mutator that will be invoked to split Modules into variants.
|
|
|
|
// Each registered mutator is invoked in registration order (mixing TopDownMutators and
|
|
|
|
// BottomUpMutators) once per Module, will not be invoked on a module until the invocations on all
|
|
|
|
// of the modules dependencies have returned.
|
2014-12-19 01:28:54 +01:00
|
|
|
//
|
2015-03-11 04:08:19 +01:00
|
|
|
// The mutator type names given here must be unique to all bottom up or early
|
|
|
|
// mutators in the Context.
|
2016-08-06 07:30:44 +02:00
|
|
|
//
|
2016-08-11 20:09:00 +02:00
|
|
|
// Returns a MutatorHandle, on which Parallel can be called to set the mutator to visit modules in
|
|
|
|
// parallel while maintaining ordering.
|
|
|
|
func (c *Context) RegisterBottomUpMutator(name string, mutator BottomUpMutator) MutatorHandle {
|
2015-03-11 04:08:19 +01:00
|
|
|
for _, m := range c.variantMutatorNames {
|
|
|
|
if m == name {
|
2014-12-19 01:28:54 +01:00
|
|
|
panic(fmt.Errorf("mutator name %s is already registered", name))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-06 07:30:44 +02:00
|
|
|
info := &mutatorInfo{
|
2014-12-19 01:28:54 +01:00
|
|
|
bottomUpMutator: mutator,
|
2015-01-03 00:19:28 +01:00
|
|
|
name: name,
|
2016-08-06 07:30:44 +02:00
|
|
|
}
|
|
|
|
c.mutatorInfo = append(c.mutatorInfo, info)
|
2015-03-11 04:08:19 +01:00
|
|
|
|
|
|
|
c.variantMutatorNames = append(c.variantMutatorNames, name)
|
2016-08-06 07:30:44 +02:00
|
|
|
|
|
|
|
return info
|
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
type MutatorHandle interface {
|
|
|
|
// Set the mutator to visit modules in parallel while maintaining ordering. Calling any
|
|
|
|
// method on the mutator context is thread-safe, but the mutator must handle synchronization
|
|
|
|
// for any modifications to global state or any modules outside the one it was invoked on.
|
|
|
|
Parallel() MutatorHandle
|
2016-08-06 07:30:44 +02:00
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
func (mutator *mutatorInfo) Parallel() MutatorHandle {
|
2016-08-06 07:30:44 +02:00
|
|
|
mutator.parallel = true
|
|
|
|
return mutator
|
2015-03-11 04:08:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterEarlyMutator registers a mutator that will be invoked to split
|
|
|
|
// Modules into multiple variant Modules before any dependencies have been
|
|
|
|
// created. Each registered mutator is invoked in registration order once
|
|
|
|
// per Module (including each variant from previous early mutators). Module
|
|
|
|
// order is unpredictable.
|
|
|
|
//
|
|
|
|
// In order for dependencies to be satisifed in a later pass, all dependencies
|
2015-03-14 00:02:36 +01:00
|
|
|
// of a module either must have an identical variant or must have no variations.
|
2015-03-11 04:08:19 +01:00
|
|
|
//
|
|
|
|
// The mutator type names given here must be unique to all bottom up or early
|
|
|
|
// mutators in the Context.
|
2015-10-29 23:32:56 +01:00
|
|
|
//
|
|
|
|
// Deprecated, use a BottomUpMutator instead. The only difference between
|
|
|
|
// EarlyMutator and BottomUpMutator is that EarlyMutator runs before the
|
|
|
|
// deprecated DynamicDependencies.
|
2015-03-11 04:08:19 +01:00
|
|
|
func (c *Context) RegisterEarlyMutator(name string, mutator EarlyMutator) {
|
|
|
|
for _, m := range c.variantMutatorNames {
|
|
|
|
if m == name {
|
|
|
|
panic(fmt.Errorf("mutator name %s is already registered", name))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-10 21:56:40 +02:00
|
|
|
c.earlyMutatorInfo = append(c.earlyMutatorInfo, &mutatorInfo{
|
|
|
|
bottomUpMutator: func(mctx BottomUpMutatorContext) {
|
|
|
|
mutator(mctx)
|
|
|
|
},
|
|
|
|
name: name,
|
2015-03-11 04:08:19 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
c.variantMutatorNames = append(c.variantMutatorNames, name)
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2014-06-13 05:06:50 +02:00
|
|
|
// SetIgnoreUnknownModuleTypes sets the behavior of the context in the case
|
|
|
|
// where it encounters an unknown module type while parsing Blueprints files. By
|
|
|
|
// default, the context will report unknown module types as an error. If this
|
|
|
|
// method is called with ignoreUnknownModuleTypes set to true then the context
|
|
|
|
// will silently ignore unknown module types.
|
|
|
|
//
|
|
|
|
// This method should generally not be used. It exists to facilitate the
|
|
|
|
// bootstrapping process.
|
2014-05-28 01:34:41 +02:00
|
|
|
func (c *Context) SetIgnoreUnknownModuleTypes(ignoreUnknownModuleTypes bool) {
|
|
|
|
c.ignoreUnknownModuleTypes = ignoreUnknownModuleTypes
|
|
|
|
}
|
|
|
|
|
2015-12-18 00:49:30 +01:00
|
|
|
// SetAllowMissingDependencies changes the behavior of Blueprint to ignore
|
|
|
|
// unresolved dependencies. If the module's GenerateBuildActions calls
|
|
|
|
// ModuleContext.GetMissingDependencies Blueprint will not emit any errors
|
|
|
|
// for missing dependencies.
|
|
|
|
func (c *Context) SetAllowMissingDependencies(allowMissingDependencies bool) {
|
|
|
|
c.allowMissingDependencies = allowMissingDependencies
|
|
|
|
}
|
|
|
|
|
2017-08-10 00:13:12 +02:00
|
|
|
func (c *Context) SetModuleListFile(listFile string) {
|
|
|
|
c.moduleListFile = listFile
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) ListModulePaths(baseDir string) (paths []string, err error) {
|
|
|
|
reader, err := c.fs.Open(c.moduleListFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
bytes, err := ioutil.ReadAll(reader)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
text := string(bytes)
|
|
|
|
|
|
|
|
text = strings.Trim(text, "\n")
|
|
|
|
lines := strings.Split(text, "\n")
|
|
|
|
for i := range lines {
|
|
|
|
lines[i] = filepath.Join(baseDir, lines[i])
|
|
|
|
}
|
|
|
|
|
|
|
|
return lines, nil
|
|
|
|
}
|
|
|
|
|
2017-11-30 03:37:31 +01:00
|
|
|
// a fileParseContext tells the status of parsing a particular file
|
|
|
|
type fileParseContext struct {
|
|
|
|
// name of file
|
|
|
|
fileName string
|
|
|
|
|
|
|
|
// scope to use when resolving variables
|
|
|
|
Scope *parser.Scope
|
|
|
|
|
|
|
|
// pointer to the one in the parent directory
|
|
|
|
parent *fileParseContext
|
|
|
|
|
|
|
|
// is closed once FileHandler has completed for this file
|
|
|
|
doneVisiting chan struct{}
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2017-08-10 00:13:12 +02:00
|
|
|
func (c *Context) ParseBlueprintsFiles(rootFile string) (deps []string, errs []error) {
|
|
|
|
baseDir := filepath.Dir(rootFile)
|
|
|
|
pathsToParse, err := c.ListModulePaths(baseDir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, []error{err}
|
|
|
|
}
|
|
|
|
return c.ParseFileList(baseDir, pathsToParse)
|
|
|
|
}
|
|
|
|
|
2014-06-13 05:06:50 +02:00
|
|
|
// ParseBlueprintsFiles parses a set of Blueprints files starting with the file
|
|
|
|
// at rootFile. When it encounters a Blueprints file with a set of subdirs
|
|
|
|
// listed it recursively parses any Blueprints files found in those
|
|
|
|
// subdirectories.
|
|
|
|
//
|
|
|
|
// If no errors are encountered while parsing the files, the list of paths on
|
|
|
|
// which the future output will depend is returned. This list will include both
|
|
|
|
// Blueprints file paths as well as directory paths for cases where wildcard
|
|
|
|
// subdirs are found.
|
2017-08-10 00:13:12 +02:00
|
|
|
func (c *Context) ParseFileList(rootDir string, filePaths []string) (deps []string,
|
2014-05-28 01:34:41 +02:00
|
|
|
errs []error) {
|
|
|
|
|
2017-08-10 00:13:12 +02:00
|
|
|
if len(filePaths) < 1 {
|
|
|
|
return nil, []error{fmt.Errorf("no paths provided to parse")}
|
|
|
|
}
|
|
|
|
|
2015-01-08 01:22:45 +01:00
|
|
|
c.dependenciesReady = false
|
|
|
|
|
2015-07-01 01:05:22 +02:00
|
|
|
moduleCh := make(chan *moduleInfo)
|
|
|
|
errsCh := make(chan []error)
|
|
|
|
doneCh := make(chan struct{})
|
|
|
|
var numErrs uint32
|
|
|
|
var numGoroutines int32
|
|
|
|
|
|
|
|
// handler must be reentrant
|
2017-08-08 23:43:58 +02:00
|
|
|
handleOneFile := func(file *parser.File) {
|
2015-07-01 01:05:22 +02:00
|
|
|
if atomic.LoadUint32(&numErrs) > maxErrors {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-11-30 03:37:31 +01:00
|
|
|
for _, def := range file.Defs {
|
|
|
|
var module *moduleInfo
|
|
|
|
var errs []error
|
|
|
|
switch def := def.(type) {
|
|
|
|
case *parser.Module:
|
|
|
|
module, errs = c.processModuleDef(def, file.Name)
|
|
|
|
case *parser.Assignment:
|
|
|
|
// Already handled via Scope object
|
|
|
|
default:
|
|
|
|
panic("unknown definition type")
|
|
|
|
}
|
2015-07-01 01:05:22 +02:00
|
|
|
|
2017-11-30 03:37:31 +01:00
|
|
|
if len(errs) > 0 {
|
|
|
|
atomic.AddUint32(&numErrs, uint32(len(errs)))
|
|
|
|
errsCh <- errs
|
|
|
|
} else if module != nil {
|
|
|
|
moduleCh <- module
|
2015-07-01 01:05:22 +02:00
|
|
|
}
|
2017-11-30 03:37:31 +01:00
|
|
|
}
|
2015-07-01 01:05:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
atomic.AddInt32(&numGoroutines, 1)
|
|
|
|
go func() {
|
|
|
|
var errs []error
|
2017-08-10 00:13:12 +02:00
|
|
|
deps, errs = c.WalkBlueprintsFiles(rootDir, filePaths, handleOneFile)
|
2015-07-01 01:05:22 +02:00
|
|
|
if len(errs) > 0 {
|
|
|
|
errsCh <- errs
|
|
|
|
}
|
|
|
|
doneCh <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case newErrs := <-errsCh:
|
|
|
|
errs = append(errs, newErrs...)
|
|
|
|
case module := <-moduleCh:
|
|
|
|
newErrs := c.addModule(module)
|
|
|
|
if len(newErrs) > 0 {
|
|
|
|
errs = append(errs, newErrs...)
|
|
|
|
}
|
|
|
|
case <-doneCh:
|
|
|
|
n := atomic.AddInt32(&numGoroutines, -1)
|
|
|
|
if n == 0 {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return deps, errs
|
|
|
|
}
|
|
|
|
|
|
|
|
type FileHandler func(*parser.File)
|
|
|
|
|
2017-08-10 00:13:12 +02:00
|
|
|
// WalkBlueprintsFiles walks a set of Blueprints files starting with the given filepaths,
|
|
|
|
// calling the given file handler on each
|
|
|
|
//
|
|
|
|
// When WalkBlueprintsFiles encounters a Blueprints file with a set of subdirs listed,
|
|
|
|
// it recursively parses any Blueprints files found in those subdirectories.
|
|
|
|
//
|
|
|
|
// If any of the file paths is an ancestor directory of any other of file path, the ancestor
|
|
|
|
// will be parsed and visited first.
|
|
|
|
//
|
|
|
|
// the file handler will be called from a goroutine, so it must be reentrant.
|
2015-07-01 01:05:22 +02:00
|
|
|
//
|
|
|
|
// If no errors are encountered while parsing the files, the list of paths on
|
|
|
|
// which the future output will depend is returned. This list will include both
|
|
|
|
// Blueprints file paths as well as directory paths for cases where wildcard
|
|
|
|
// subdirs are found.
|
2017-11-30 03:37:31 +01:00
|
|
|
//
|
|
|
|
// visitor will be called asynchronously, and will only be called once visitor for each
|
|
|
|
// ancestor directory has completed.
|
|
|
|
//
|
|
|
|
// WalkBlueprintsFiles will not return until all calls to visitor have returned.
|
2017-08-10 00:13:12 +02:00
|
|
|
func (c *Context) WalkBlueprintsFiles(rootDir string, filePaths []string,
|
|
|
|
visitor FileHandler) (deps []string, errs []error) {
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2017-08-10 00:13:12 +02:00
|
|
|
// make a mapping from ancestors to their descendants to facilitate parsing ancestors first
|
|
|
|
descendantsMap, err := findBlueprintDescendants(filePaths)
|
|
|
|
if err != nil {
|
|
|
|
panic(err.Error())
|
|
|
|
return nil, []error{err}
|
|
|
|
}
|
2015-01-08 01:22:45 +01:00
|
|
|
blueprintsSet := make(map[string]bool)
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2017-12-05 23:57:58 +01:00
|
|
|
// Channels to receive data back from openAndParse goroutines
|
2017-11-30 03:37:31 +01:00
|
|
|
blueprintsCh := make(chan fileParseContext)
|
2015-01-08 01:22:45 +01:00
|
|
|
errsCh := make(chan []error)
|
|
|
|
depsCh := make(chan string)
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2017-12-05 23:57:58 +01:00
|
|
|
// Channel to notify main loop that a openAndParse goroutine has finished
|
2017-11-30 03:37:31 +01:00
|
|
|
doneParsingCh := make(chan fileParseContext)
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-01-08 01:22:45 +01:00
|
|
|
// Number of outstanding goroutines to wait for
|
2017-08-08 23:43:58 +02:00
|
|
|
activeCount := 0
|
2017-11-30 03:37:31 +01:00
|
|
|
var pending []fileParseContext
|
2017-08-10 00:13:12 +02:00
|
|
|
tooManyErrors := false
|
|
|
|
|
|
|
|
// Limit concurrent calls to parseBlueprintFiles to 200
|
|
|
|
// Darwin has a default limit of 256 open files
|
|
|
|
maxActiveCount := 200
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2017-11-30 03:37:31 +01:00
|
|
|
// count the number of pending calls to visitor()
|
|
|
|
visitorWaitGroup := sync.WaitGroup{}
|
|
|
|
|
|
|
|
startParseBlueprintsFile := func(blueprint fileParseContext) {
|
|
|
|
if blueprintsSet[blueprint.fileName] {
|
2017-05-16 19:33:58 +02:00
|
|
|
return
|
|
|
|
}
|
2017-11-30 03:37:31 +01:00
|
|
|
blueprintsSet[blueprint.fileName] = true
|
2017-08-08 23:43:58 +02:00
|
|
|
activeCount++
|
2017-11-30 03:37:31 +01:00
|
|
|
deps = append(deps, blueprint.fileName)
|
|
|
|
visitorWaitGroup.Add(1)
|
2015-01-08 01:22:45 +01:00
|
|
|
go func() {
|
2017-12-06 00:03:51 +01:00
|
|
|
file, blueprints, deps, errs := c.openAndParse(blueprint.fileName, blueprint.Scope, rootDir,
|
|
|
|
&blueprint)
|
|
|
|
if len(errs) > 0 {
|
|
|
|
errsCh <- errs
|
|
|
|
}
|
|
|
|
for _, blueprint := range blueprints {
|
|
|
|
blueprintsCh <- blueprint
|
|
|
|
}
|
|
|
|
for _, dep := range deps {
|
|
|
|
depsCh <- dep
|
|
|
|
}
|
2017-11-30 03:37:31 +01:00
|
|
|
doneParsingCh <- blueprint
|
|
|
|
|
|
|
|
if blueprint.parent != nil && blueprint.parent.doneVisiting != nil {
|
|
|
|
// wait for visitor() of parent to complete
|
|
|
|
<-blueprint.parent.doneVisiting
|
|
|
|
}
|
2017-08-08 23:43:58 +02:00
|
|
|
|
2017-12-06 00:11:55 +01:00
|
|
|
if len(errs) == 0 {
|
|
|
|
// process this file
|
|
|
|
visitor(file)
|
|
|
|
}
|
2017-11-30 03:37:31 +01:00
|
|
|
if blueprint.doneVisiting != nil {
|
|
|
|
close(blueprint.doneVisiting)
|
|
|
|
}
|
|
|
|
visitorWaitGroup.Done()
|
2015-01-08 01:22:45 +01:00
|
|
|
}()
|
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2017-11-30 03:37:31 +01:00
|
|
|
foundParseableBlueprint := func(blueprint fileParseContext) {
|
2017-08-10 00:13:12 +02:00
|
|
|
if activeCount >= maxActiveCount {
|
|
|
|
pending = append(pending, blueprint)
|
|
|
|
} else {
|
|
|
|
startParseBlueprintsFile(blueprint)
|
|
|
|
}
|
|
|
|
}
|
2015-01-08 01:22:45 +01:00
|
|
|
|
2017-11-30 03:37:31 +01:00
|
|
|
startParseDescendants := func(blueprint fileParseContext) {
|
|
|
|
descendants, hasDescendants := descendantsMap[blueprint.fileName]
|
2017-08-10 00:13:12 +02:00
|
|
|
if hasDescendants {
|
|
|
|
for _, descendant := range descendants {
|
2017-11-30 03:37:31 +01:00
|
|
|
foundParseableBlueprint(fileParseContext{descendant, parser.NewScope(blueprint.Scope), &blueprint, make(chan struct{})})
|
2017-08-10 00:13:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-16 19:33:58 +02:00
|
|
|
|
2017-08-10 00:13:12 +02:00
|
|
|
// begin parsing any files that have no ancestors
|
2017-11-30 03:37:31 +01:00
|
|
|
startParseDescendants(fileParseContext{"", parser.NewScope(nil), nil, nil})
|
2015-01-08 01:22:45 +01:00
|
|
|
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
if len(errs) > maxErrors {
|
|
|
|
tooManyErrors = true
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2015-01-08 01:22:45 +01:00
|
|
|
select {
|
|
|
|
case newErrs := <-errsCh:
|
2014-05-28 01:34:41 +02:00
|
|
|
errs = append(errs, newErrs...)
|
2015-01-08 01:22:45 +01:00
|
|
|
case dep := <-depsCh:
|
|
|
|
deps = append(deps, dep)
|
|
|
|
case blueprint := <-blueprintsCh:
|
|
|
|
if tooManyErrors {
|
|
|
|
continue
|
|
|
|
}
|
2017-08-10 00:13:12 +02:00
|
|
|
foundParseableBlueprint(blueprint)
|
2017-11-30 03:37:31 +01:00
|
|
|
case blueprint := <-doneParsingCh:
|
2017-08-08 23:43:58 +02:00
|
|
|
activeCount--
|
2017-08-10 00:13:12 +02:00
|
|
|
if !tooManyErrors {
|
|
|
|
startParseDescendants(blueprint)
|
|
|
|
}
|
|
|
|
if activeCount < maxActiveCount && len(pending) > 0 {
|
|
|
|
// start to process the next one from the queue
|
|
|
|
next := pending[len(pending)-1]
|
2017-05-16 19:33:58 +02:00
|
|
|
pending = pending[:len(pending)-1]
|
2017-08-10 00:13:12 +02:00
|
|
|
startParseBlueprintsFile(next)
|
2017-05-16 19:33:58 +02:00
|
|
|
}
|
2017-08-08 23:43:58 +02:00
|
|
|
if activeCount == 0 {
|
2015-01-08 01:22:45 +01:00
|
|
|
break loop
|
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
2015-01-08 01:22:45 +01:00
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2017-08-10 00:13:12 +02:00
|
|
|
sort.Strings(deps)
|
|
|
|
|
2017-11-30 03:37:31 +01:00
|
|
|
// wait for every visitor() to complete
|
|
|
|
visitorWaitGroup.Wait()
|
|
|
|
|
2015-01-08 01:22:45 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-06-03 00:30:20 +02:00
|
|
|
// MockFileSystem causes the Context to replace all reads with accesses to the provided map of
|
|
|
|
// filenames to contents stored as a byte slice.
|
|
|
|
func (c *Context) MockFileSystem(files map[string][]byte) {
|
2017-11-15 23:49:48 +01:00
|
|
|
// look for a module list file
|
|
|
|
_, ok := files[MockModuleListFile]
|
|
|
|
if !ok {
|
|
|
|
// no module list file specified; find every file named Blueprints
|
|
|
|
pathsToParse := []string{}
|
|
|
|
for candidate := range files {
|
|
|
|
if filepath.Base(candidate) == "Blueprints" {
|
|
|
|
pathsToParse = append(pathsToParse, candidate)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(pathsToParse) < 1 {
|
|
|
|
panic(fmt.Sprintf("No Blueprints files found in mock filesystem: %v\n", files))
|
2017-08-10 00:13:12 +02:00
|
|
|
}
|
2017-11-15 23:49:48 +01:00
|
|
|
// put the list of Blueprints files into a list file
|
|
|
|
files[MockModuleListFile] = []byte(strings.Join(pathsToParse, "\n"))
|
2017-08-10 00:13:12 +02:00
|
|
|
}
|
2017-11-15 23:49:48 +01:00
|
|
|
c.SetModuleListFile(MockModuleListFile)
|
2017-08-10 00:13:12 +02:00
|
|
|
|
|
|
|
// mock the filesystem
|
2017-02-01 22:21:35 +01:00
|
|
|
c.fs = pathtools.MockFs(files)
|
2016-06-03 00:30:20 +02:00
|
|
|
}
|
|
|
|
|
2017-12-06 00:03:51 +01:00
|
|
|
// openAndParse opens and parses a single Blueprints file, and returns the results
|
2017-12-05 23:57:58 +01:00
|
|
|
func (c *Context) openAndParse(filename string, scope *parser.Scope, rootDir string,
|
2017-12-06 00:03:51 +01:00
|
|
|
parent *fileParseContext) (file *parser.File,
|
|
|
|
subBlueprints []fileParseContext, deps []string, errs []error) {
|
2015-01-08 01:22:45 +01:00
|
|
|
|
2016-06-03 00:30:20 +02:00
|
|
|
f, err := c.fs.Open(filename)
|
2015-01-08 01:22:45 +01:00
|
|
|
if err != nil {
|
2017-08-24 02:30:05 +02:00
|
|
|
// couldn't open the file; see if we can provide a clearer error than "could not open file"
|
|
|
|
stats, statErr := c.fs.Lstat(filename)
|
|
|
|
if statErr == nil {
|
|
|
|
isSymlink := stats.Mode()&os.ModeSymlink != 0
|
|
|
|
if isSymlink {
|
|
|
|
err = fmt.Errorf("could not open symlink %v : %v", filename, err)
|
|
|
|
target, readlinkErr := os.Readlink(filename)
|
|
|
|
if readlinkErr == nil {
|
|
|
|
_, targetStatsErr := c.fs.Lstat(target)
|
|
|
|
if targetStatsErr != nil {
|
|
|
|
err = fmt.Errorf("could not open symlink %v; its target (%v) cannot be opened", filename, target)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = fmt.Errorf("%v exists but could not be opened: %v", filename, err)
|
|
|
|
}
|
|
|
|
}
|
2017-12-06 00:03:51 +01:00
|
|
|
return nil, nil, nil, []error{err}
|
2015-01-08 01:22:45 +01:00
|
|
|
}
|
2017-11-30 03:37:31 +01:00
|
|
|
|
2017-12-06 00:03:51 +01:00
|
|
|
func() {
|
|
|
|
defer func() {
|
|
|
|
err = f.Close()
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
file, subBlueprints, errs = c.parseOne(rootDir, filename, f, scope, parent)
|
2015-07-01 01:05:22 +02:00
|
|
|
}()
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-01-08 01:22:45 +01:00
|
|
|
if len(errs) > 0 {
|
2017-12-06 00:03:51 +01:00
|
|
|
return nil, nil, nil, errs
|
2015-01-08 01:22:45 +01:00
|
|
|
}
|
|
|
|
|
2015-04-21 01:50:54 +02:00
|
|
|
for _, b := range subBlueprints {
|
2017-12-06 00:03:51 +01:00
|
|
|
deps = append(deps, b.fileName)
|
2015-04-21 01:50:54 +02:00
|
|
|
}
|
2017-11-30 03:37:31 +01:00
|
|
|
|
2017-12-06 00:03:51 +01:00
|
|
|
return file, subBlueprints, deps, nil
|
2015-04-21 01:50:54 +02:00
|
|
|
}
|
|
|
|
|
2017-08-08 23:45:56 +02:00
|
|
|
// parseOne parses a single Blueprints file from the given reader, creating Module
|
|
|
|
// objects for each of the module definitions encountered. If the Blueprints
|
|
|
|
// file contains an assignment to the "subdirs" variable, then the
|
|
|
|
// subdirectories listed are searched for Blueprints files returned in the
|
|
|
|
// subBlueprints return value. If the Blueprints file contains an assignment
|
|
|
|
// to the "build" variable, then the file listed are returned in the
|
|
|
|
// subBlueprints return value.
|
|
|
|
//
|
|
|
|
// rootDir specifies the path to the root directory of the source tree, while
|
|
|
|
// filename specifies the path to the Blueprints file. These paths are used for
|
|
|
|
// error reporting and for determining the module's directory.
|
|
|
|
func (c *Context) parseOne(rootDir, filename string, reader io.Reader,
|
2017-11-30 03:37:31 +01:00
|
|
|
scope *parser.Scope, parent *fileParseContext) (file *parser.File, subBlueprints []fileParseContext, errs []error) {
|
2017-08-08 23:45:56 +02:00
|
|
|
|
|
|
|
relBlueprintsFile, err := filepath.Rel(rootDir, filename)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, []error{err}
|
|
|
|
}
|
|
|
|
|
|
|
|
scope.Remove("subdirs")
|
|
|
|
scope.Remove("optional_subdirs")
|
|
|
|
scope.Remove("build")
|
|
|
|
file, errs = parser.ParseAndEval(filename, reader, scope)
|
|
|
|
if len(errs) > 0 {
|
|
|
|
for i, err := range errs {
|
|
|
|
if parseErr, ok := err.(*parser.ParseError); ok {
|
|
|
|
err = &BlueprintError{
|
|
|
|
Err: parseErr.Err,
|
|
|
|
Pos: parseErr.Pos,
|
|
|
|
}
|
|
|
|
errs[i] = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there were any parse errors don't bother trying to interpret the
|
|
|
|
// result.
|
|
|
|
return nil, nil, errs
|
|
|
|
}
|
|
|
|
file.Name = relBlueprintsFile
|
|
|
|
|
|
|
|
build, buildPos, err := getLocalStringListFromScope(scope, "build")
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
2017-12-01 02:31:43 +01:00
|
|
|
for _, buildEntry := range build {
|
|
|
|
if strings.Contains(buildEntry, "/") {
|
|
|
|
errs = append(errs, &BlueprintError{
|
|
|
|
Err: fmt.Errorf("illegal value %v. The '/' character is not permitted", buildEntry),
|
|
|
|
Pos: buildPos,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2017-08-08 23:45:56 +02:00
|
|
|
|
|
|
|
subBlueprintsName, _, err := getStringFromScope(scope, "subname")
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if subBlueprintsName == "" {
|
|
|
|
subBlueprintsName = "Blueprints"
|
|
|
|
}
|
|
|
|
|
|
|
|
var blueprints []string
|
|
|
|
|
|
|
|
newBlueprints, newErrs := c.findBuildBlueprints(filepath.Dir(filename), build, buildPos)
|
|
|
|
blueprints = append(blueprints, newBlueprints...)
|
|
|
|
errs = append(errs, newErrs...)
|
|
|
|
|
2017-11-30 03:37:31 +01:00
|
|
|
subBlueprintsAndScope := make([]fileParseContext, len(blueprints))
|
2017-08-08 23:45:56 +02:00
|
|
|
for i, b := range blueprints {
|
2017-11-30 03:37:31 +01:00
|
|
|
subBlueprintsAndScope[i] = fileParseContext{b, parser.NewScope(scope), parent, make(chan struct{})}
|
2017-08-08 23:45:56 +02:00
|
|
|
}
|
|
|
|
return file, subBlueprintsAndScope, errs
|
|
|
|
}
|
|
|
|
|
2015-12-16 22:03:41 +01:00
|
|
|
func (c *Context) findBuildBlueprints(dir string, build []string,
|
2016-11-01 19:10:51 +01:00
|
|
|
buildPos scanner.Position) ([]string, []error) {
|
|
|
|
|
|
|
|
var blueprints []string
|
|
|
|
var errs []error
|
2015-12-16 22:03:41 +01:00
|
|
|
|
|
|
|
for _, file := range build {
|
2016-11-01 19:10:51 +01:00
|
|
|
pattern := filepath.Join(dir, file)
|
|
|
|
var matches []string
|
|
|
|
var err error
|
|
|
|
|
2016-11-15 00:23:33 +01:00
|
|
|
matches, err = c.glob(pattern, nil)
|
2016-11-01 19:10:51 +01:00
|
|
|
|
2015-12-16 22:03:41 +01:00
|
|
|
if err != nil {
|
2016-10-08 02:13:10 +02:00
|
|
|
errs = append(errs, &BlueprintError{
|
2016-11-01 19:10:51 +01:00
|
|
|
Err: fmt.Errorf("%q: %s", pattern, err.Error()),
|
2015-12-16 22:03:41 +01:00
|
|
|
Pos: buildPos,
|
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(matches) == 0 {
|
2016-10-08 02:13:10 +02:00
|
|
|
errs = append(errs, &BlueprintError{
|
2016-11-01 19:10:51 +01:00
|
|
|
Err: fmt.Errorf("%q: not found", pattern),
|
2015-12-16 22:03:41 +01:00
|
|
|
Pos: buildPos,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, foundBlueprints := range matches {
|
|
|
|
blueprints = append(blueprints, foundBlueprints)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-01 19:10:51 +01:00
|
|
|
return blueprints, errs
|
2015-12-16 22:03:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) findSubdirBlueprints(dir string, subdirs []string, subdirsPos scanner.Position,
|
2016-11-01 19:10:51 +01:00
|
|
|
subBlueprintsName string, optional bool) ([]string, []error) {
|
|
|
|
|
|
|
|
var blueprints []string
|
|
|
|
var errs []error
|
2015-01-08 01:22:45 +01:00
|
|
|
|
|
|
|
for _, subdir := range subdirs {
|
2016-11-01 19:10:51 +01:00
|
|
|
pattern := filepath.Join(dir, subdir, subBlueprintsName)
|
|
|
|
var matches []string
|
|
|
|
var err error
|
|
|
|
|
2016-11-15 00:23:33 +01:00
|
|
|
matches, err = c.glob(pattern, nil)
|
2016-11-01 19:10:51 +01:00
|
|
|
|
2015-04-01 05:39:02 +02:00
|
|
|
if err != nil {
|
2016-10-08 02:13:10 +02:00
|
|
|
errs = append(errs, &BlueprintError{
|
2016-11-01 19:10:51 +01:00
|
|
|
Err: fmt.Errorf("%q: %s", pattern, err.Error()),
|
2015-04-21 01:50:54 +02:00
|
|
|
Pos: subdirsPos,
|
|
|
|
})
|
|
|
|
continue
|
2015-04-01 05:39:02 +02:00
|
|
|
}
|
2015-01-08 01:22:45 +01:00
|
|
|
|
2015-12-16 22:03:41 +01:00
|
|
|
if len(matches) == 0 && !optional {
|
2016-10-08 02:13:10 +02:00
|
|
|
errs = append(errs, &BlueprintError{
|
2016-11-01 19:10:51 +01:00
|
|
|
Err: fmt.Errorf("%q: not found", pattern),
|
2015-04-21 01:50:54 +02:00
|
|
|
Pos: subdirsPos,
|
|
|
|
})
|
2015-04-01 05:39:02 +02:00
|
|
|
}
|
2015-01-08 01:22:45 +01:00
|
|
|
|
2016-11-01 19:10:51 +01:00
|
|
|
for _, subBlueprints := range matches {
|
|
|
|
blueprints = append(blueprints, subBlueprints)
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-01 19:10:51 +01:00
|
|
|
return blueprints, errs
|
2015-04-21 01:50:54 +02:00
|
|
|
}
|
|
|
|
|
2015-07-11 02:51:55 +02:00
|
|
|
func getLocalStringListFromScope(scope *parser.Scope, v string) ([]string, scanner.Position, error) {
|
|
|
|
if assignment, local := scope.Get(v); assignment == nil || !local {
|
|
|
|
return nil, scanner.Position{}, nil
|
|
|
|
} else {
|
2016-06-07 21:28:16 +02:00
|
|
|
switch value := assignment.Value.Eval().(type) {
|
|
|
|
case *parser.List:
|
|
|
|
ret := make([]string, 0, len(value.Values))
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2016-06-07 21:28:16 +02:00
|
|
|
for _, listValue := range value.Values {
|
|
|
|
s, ok := listValue.(*parser.String)
|
|
|
|
if !ok {
|
2014-05-28 01:34:41 +02:00
|
|
|
// The parser should not produce this.
|
|
|
|
panic("non-string value found in list")
|
|
|
|
}
|
|
|
|
|
2016-06-07 21:28:16 +02:00
|
|
|
ret = append(ret, s.Value)
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2016-06-10 02:03:57 +02:00
|
|
|
return ret, assignment.EqualsPos, nil
|
2016-06-07 21:28:16 +02:00
|
|
|
case *parser.Bool, *parser.String:
|
2016-10-08 02:13:10 +02:00
|
|
|
return nil, scanner.Position{}, &BlueprintError{
|
2015-04-21 01:50:54 +02:00
|
|
|
Err: fmt.Errorf("%q must be a list of strings", v),
|
2016-06-10 02:03:57 +02:00
|
|
|
Pos: assignment.EqualsPos,
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
panic(fmt.Errorf("unknown value type: %d", assignment.Value.Type))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-27 22:18:21 +02:00
|
|
|
func getStringFromScope(scope *parser.Scope, v string) (string, scanner.Position, error) {
|
2015-07-11 02:51:55 +02:00
|
|
|
if assignment, _ := scope.Get(v); assignment == nil {
|
|
|
|
return "", scanner.Position{}, nil
|
|
|
|
} else {
|
2016-06-07 21:28:16 +02:00
|
|
|
switch value := assignment.Value.Eval().(type) {
|
|
|
|
case *parser.String:
|
2016-06-10 02:03:57 +02:00
|
|
|
return value.Value, assignment.EqualsPos, nil
|
2016-06-07 21:28:16 +02:00
|
|
|
case *parser.Bool, *parser.List:
|
2016-10-08 02:13:10 +02:00
|
|
|
return "", scanner.Position{}, &BlueprintError{
|
2015-04-27 22:18:21 +02:00
|
|
|
Err: fmt.Errorf("%q must be a string", v),
|
2016-06-10 02:03:57 +02:00
|
|
|
Pos: assignment.EqualsPos,
|
2015-04-27 22:18:21 +02:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
panic(fmt.Errorf("unknown value type: %d", assignment.Value.Type))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-12 00:41:52 +02:00
|
|
|
// Clones a build logic module by calling the factory method for its module type, and then cloning
|
|
|
|
// property values. Any values stored in the module object that are not stored in properties
|
|
|
|
// structs will be lost.
|
|
|
|
func (c *Context) cloneLogicModule(origModule *moduleInfo) (Module, []interface{}) {
|
2017-07-28 23:32:36 +02:00
|
|
|
newLogicModule, newProperties := origModule.factory()
|
2016-04-12 00:41:52 +02:00
|
|
|
|
2017-07-28 23:31:03 +02:00
|
|
|
if len(newProperties) != len(origModule.properties) {
|
2016-05-17 23:58:05 +02:00
|
|
|
panic("mismatched properties array length in " + origModule.Name())
|
2016-04-12 00:41:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := range newProperties {
|
|
|
|
dst := reflect.ValueOf(newProperties[i]).Elem()
|
2017-07-28 23:31:03 +02:00
|
|
|
src := reflect.ValueOf(origModule.properties[i]).Elem()
|
2016-04-12 00:41:52 +02:00
|
|
|
|
|
|
|
proptools.CopyProperties(dst, src)
|
|
|
|
}
|
|
|
|
|
|
|
|
return newLogicModule, newProperties
|
|
|
|
}
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
func (c *Context) createVariations(origModule *moduleInfo, mutatorName string,
|
|
|
|
variationNames []string) ([]*moduleInfo, []error) {
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2015-03-19 01:43:15 +01:00
|
|
|
if len(variationNames) == 0 {
|
|
|
|
panic(fmt.Errorf("mutator %q passed zero-length variation list for module %q",
|
2016-05-17 23:58:05 +02:00
|
|
|
mutatorName, origModule.Name()))
|
2015-03-19 01:43:15 +01:00
|
|
|
}
|
|
|
|
|
2014-12-19 01:28:54 +01:00
|
|
|
newModules := []*moduleInfo{}
|
|
|
|
|
2015-03-04 02:37:03 +01:00
|
|
|
var errs []error
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
for i, variationName := range variationNames {
|
2014-12-19 01:28:54 +01:00
|
|
|
var newLogicModule Module
|
|
|
|
var newProperties []interface{}
|
|
|
|
|
|
|
|
if i == 0 {
|
|
|
|
// Reuse the existing module for the first new variant
|
2015-03-16 18:57:54 +01:00
|
|
|
// This both saves creating a new module, and causes the insertion in c.moduleInfo below
|
|
|
|
// with logicModule as the key to replace the original entry in c.moduleInfo
|
2017-07-28 23:31:03 +02:00
|
|
|
newLogicModule, newProperties = origModule.logicModule, origModule.properties
|
2014-12-19 01:28:54 +01:00
|
|
|
} else {
|
2016-04-12 00:41:52 +02:00
|
|
|
newLogicModule, newProperties = c.cloneLogicModule(origModule)
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
newVariant := origModule.variant.clone()
|
|
|
|
newVariant[mutatorName] = variationName
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2015-03-11 08:57:25 +01:00
|
|
|
m := *origModule
|
|
|
|
newModule := &m
|
2016-04-12 00:47:28 +02:00
|
|
|
newModule.directDeps = append([]depInfo{}, origModule.directDeps...)
|
2015-03-11 08:57:25 +01:00
|
|
|
newModule.logicModule = newLogicModule
|
2015-03-14 00:02:36 +01:00
|
|
|
newModule.variant = newVariant
|
|
|
|
newModule.dependencyVariant = origModule.dependencyVariant.clone()
|
2017-07-28 23:31:03 +02:00
|
|
|
newModule.properties = newProperties
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2016-08-12 02:01:46 +02:00
|
|
|
if variationName != "" {
|
|
|
|
if newModule.variantName == "" {
|
|
|
|
newModule.variantName = variationName
|
|
|
|
} else {
|
|
|
|
newModule.variantName += "_" + variationName
|
|
|
|
}
|
2015-03-11 22:35:41 +01:00
|
|
|
}
|
|
|
|
|
2014-12-19 01:28:54 +01:00
|
|
|
newModules = append(newModules, newModule)
|
2015-03-16 18:57:54 +01:00
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
newErrs := c.convertDepsToVariation(newModule, mutatorName, variationName)
|
2015-03-04 02:37:03 +01:00
|
|
|
if len(newErrs) > 0 {
|
|
|
|
errs = append(errs, newErrs...)
|
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mark original variant as invalid. Modules that depend on this module will still
|
|
|
|
// depend on origModule, but we'll fix it when the mutator is called on them.
|
|
|
|
origModule.logicModule = nil
|
|
|
|
origModule.splitModules = newModules
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
atomic.AddUint32(&c.depsModified, 1)
|
|
|
|
|
2015-03-04 02:37:03 +01:00
|
|
|
return newModules, errs
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
func (c *Context) convertDepsToVariation(module *moduleInfo,
|
|
|
|
mutatorName, variationName string) (errs []error) {
|
2015-03-04 02:37:03 +01:00
|
|
|
|
2014-12-19 01:28:54 +01:00
|
|
|
for i, dep := range module.directDeps {
|
2016-04-12 00:47:28 +02:00
|
|
|
if dep.module.logicModule == nil {
|
2014-12-19 01:28:54 +01:00
|
|
|
var newDep *moduleInfo
|
2016-04-12 00:47:28 +02:00
|
|
|
for _, m := range dep.module.splitModules {
|
2015-03-14 00:02:36 +01:00
|
|
|
if m.variant[mutatorName] == variationName {
|
2014-12-19 01:28:54 +01:00
|
|
|
newDep = m
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if newDep == nil {
|
2016-10-08 02:13:10 +02:00
|
|
|
errs = append(errs, &BlueprintError{
|
2015-03-14 00:02:36 +01:00
|
|
|
Err: fmt.Errorf("failed to find variation %q for module %q needed by %q",
|
2016-05-17 23:58:05 +02:00
|
|
|
variationName, dep.module.Name(), module.Name()),
|
2015-03-11 08:57:25 +01:00
|
|
|
Pos: module.pos,
|
2015-03-04 02:37:03 +01:00
|
|
|
})
|
|
|
|
continue
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
2016-04-12 00:47:28 +02:00
|
|
|
module.directDeps[i].module = newDep
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
}
|
2015-03-04 02:37:03 +01:00
|
|
|
|
|
|
|
return errs
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
func (c *Context) prettyPrintVariant(variant variationMap) string {
|
2015-03-11 04:08:19 +01:00
|
|
|
names := make([]string, 0, len(variant))
|
|
|
|
for _, m := range c.variantMutatorNames {
|
|
|
|
if v, ok := variant[m]; ok {
|
|
|
|
names = append(names, m+":"+v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return strings.Join(names, ", ")
|
|
|
|
}
|
|
|
|
|
2017-07-28 23:32:36 +02:00
|
|
|
func (c *Context) newModule(factory ModuleFactory) *moduleInfo {
|
|
|
|
logicModule, properties := factory()
|
|
|
|
|
|
|
|
module := &moduleInfo{
|
|
|
|
logicModule: logicModule,
|
|
|
|
factory: factory,
|
|
|
|
}
|
|
|
|
|
|
|
|
module.properties = properties
|
|
|
|
|
|
|
|
return module
|
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
func (c *Context) processModuleDef(moduleDef *parser.Module,
|
2015-01-08 01:22:45 +01:00
|
|
|
relBlueprintsFile string) (*moduleInfo, []error) {
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2016-06-10 00:52:30 +02:00
|
|
|
factory, ok := c.moduleFactories[moduleDef.Type]
|
2014-05-28 01:34:41 +02:00
|
|
|
if !ok {
|
|
|
|
if c.ignoreUnknownModuleTypes {
|
2015-01-08 01:22:45 +01:00
|
|
|
return nil, nil
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2015-01-08 01:22:45 +01:00
|
|
|
return nil, []error{
|
2016-10-08 02:13:10 +02:00
|
|
|
&BlueprintError{
|
2016-06-10 00:52:30 +02:00
|
|
|
Err: fmt.Errorf("unrecognized module type %q", moduleDef.Type),
|
|
|
|
Pos: moduleDef.TypePos,
|
2014-06-23 02:02:55 +02:00
|
|
|
},
|
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 23:32:36 +02:00
|
|
|
module := c.newModule(factory)
|
|
|
|
module.typeName = moduleDef.Type
|
2015-03-11 08:57:25 +01:00
|
|
|
|
2017-07-28 23:32:36 +02:00
|
|
|
module.relBlueprintsFile = relBlueprintsFile
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2017-07-28 23:32:36 +02:00
|
|
|
propertyMap, errs := unpackProperties(moduleDef.Properties, module.properties...)
|
2014-05-28 01:34:41 +02:00
|
|
|
if len(errs) > 0 {
|
2015-01-08 01:22:45 +01:00
|
|
|
return nil, errs
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2016-06-10 00:52:30 +02:00
|
|
|
module.pos = moduleDef.TypePos
|
2015-03-11 08:57:25 +01:00
|
|
|
module.propertyPos = make(map[string]scanner.Position)
|
2014-09-30 20:38:25 +02:00
|
|
|
for name, propertyDef := range propertyMap {
|
2016-06-10 02:03:57 +02:00
|
|
|
module.propertyPos[name] = propertyDef.ColonPos
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2015-01-08 01:22:45 +01:00
|
|
|
return module, nil
|
|
|
|
}
|
2014-12-18 01:12:41 +01:00
|
|
|
|
2015-07-01 01:05:22 +02:00
|
|
|
func (c *Context) addModule(module *moduleInfo) []error {
|
2016-05-17 23:58:05 +02:00
|
|
|
name := module.logicModule.Name()
|
2015-07-01 01:05:22 +02:00
|
|
|
c.moduleInfo[module.logicModule] = module
|
2015-03-11 08:57:25 +01:00
|
|
|
|
2016-05-17 23:58:05 +02:00
|
|
|
group := &moduleGroup{
|
2017-12-02 02:10:52 +01:00
|
|
|
name: name,
|
|
|
|
modules: []*moduleInfo{module},
|
2015-01-08 01:22:45 +01:00
|
|
|
}
|
2016-05-17 23:58:05 +02:00
|
|
|
module.group = group
|
2017-11-11 00:12:08 +01:00
|
|
|
namespace, errs := c.nameInterface.NewModule(
|
2017-12-02 02:10:52 +01:00
|
|
|
newNamespaceContext(module),
|
2017-11-11 00:12:08 +01:00
|
|
|
ModuleGroup{moduleGroup: group},
|
|
|
|
module.logicModule)
|
|
|
|
if len(errs) > 0 {
|
|
|
|
for i := range errs {
|
|
|
|
errs[i] = &BlueprintError{Err: errs[i], Pos: module.pos}
|
|
|
|
}
|
|
|
|
return errs
|
|
|
|
}
|
|
|
|
group.namespace = namespace
|
|
|
|
|
2016-05-17 23:58:05 +02:00
|
|
|
c.moduleGroups = append(c.moduleGroups, group)
|
2015-01-08 01:22:45 +01:00
|
|
|
|
2015-07-01 01:05:22 +02:00
|
|
|
return nil
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2014-06-13 05:06:50 +02:00
|
|
|
// ResolveDependencies checks that the dependencies specified by all of the
|
|
|
|
// modules defined in the parsed Blueprints files are valid. This means that
|
|
|
|
// the modules depended upon are defined and that no circular dependencies
|
|
|
|
// exist.
|
2017-08-01 02:26:06 +02:00
|
|
|
func (c *Context) ResolveDependencies(config interface{}) (deps []string, errs []error) {
|
2017-11-07 22:29:54 +01:00
|
|
|
c.liveGlobals = newLiveTracker(config)
|
|
|
|
|
|
|
|
deps, errs = c.generateSingletonBuildActions(config, c.preSingletonInfo, c.liveGlobals)
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return nil, errs
|
|
|
|
}
|
|
|
|
|
2017-08-01 02:26:06 +02:00
|
|
|
errs = c.updateDependencies()
|
2016-08-10 21:56:40 +02:00
|
|
|
if len(errs) > 0 {
|
2017-08-01 02:26:06 +02:00
|
|
|
return nil, errs
|
2016-08-10 21:56:40 +02:00
|
|
|
}
|
|
|
|
|
2017-11-07 22:29:54 +01:00
|
|
|
mutatorDeps, errs := c.runMutators(config)
|
2015-07-25 01:53:27 +02:00
|
|
|
if len(errs) > 0 {
|
2017-08-01 02:26:06 +02:00
|
|
|
return nil, errs
|
2015-07-25 01:53:27 +02:00
|
|
|
}
|
2017-11-07 22:29:54 +01:00
|
|
|
deps = append(deps, mutatorDeps...)
|
2015-07-25 01:53:27 +02:00
|
|
|
|
2016-04-12 00:41:52 +02:00
|
|
|
c.cloneModules()
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
c.dependenciesReady = true
|
2017-08-01 02:26:06 +02:00
|
|
|
return deps, nil
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2015-10-29 23:32:56 +01:00
|
|
|
// Default dependencies handling. If the module implements the (deprecated)
|
2015-03-11 04:08:19 +01:00
|
|
|
// DynamicDependerModule interface then this set consists of the union of those
|
2016-05-17 23:58:05 +02:00
|
|
|
// module names returned by its DynamicDependencies method and those added by calling
|
|
|
|
// AddDependencies or AddVariationDependencies on DynamicDependencyModuleContext.
|
2015-10-29 23:32:56 +01:00
|
|
|
func blueprintDepsMutator(ctx BottomUpMutatorContext) {
|
|
|
|
if dynamicDepender, ok := ctx.Module().(DynamicDependerModule); ok {
|
2016-01-07 22:43:09 +01:00
|
|
|
func() {
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
ctx.error(newPanicErrorf(r, "DynamicDependencies for %s", ctx.moduleInfo()))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
dynamicDeps := dynamicDepender.DynamicDependencies(ctx)
|
2014-09-25 05:28:11 +02:00
|
|
|
|
2016-01-07 22:43:09 +01:00
|
|
|
if ctx.Failed() {
|
|
|
|
return
|
|
|
|
}
|
2014-09-25 05:28:11 +02:00
|
|
|
|
2016-04-12 00:47:28 +02:00
|
|
|
ctx.AddDependency(ctx.Module(), nil, dynamicDeps...)
|
2016-01-07 22:43:09 +01:00
|
|
|
}()
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-07-25 01:53:27 +02:00
|
|
|
// findMatchingVariant searches the moduleGroup for a module with the same variant as module,
|
|
|
|
// and returns the matching module, or nil if one is not found.
|
2016-05-17 23:58:05 +02:00
|
|
|
func (c *Context) findMatchingVariant(module *moduleInfo, possible []*moduleInfo) *moduleInfo {
|
|
|
|
if len(possible) == 1 {
|
|
|
|
return possible[0]
|
2015-07-25 01:53:27 +02:00
|
|
|
} else {
|
2016-05-17 23:58:05 +02:00
|
|
|
for _, m := range possible {
|
2015-07-25 01:53:27 +02:00
|
|
|
if m.variant.equal(module.dependencyVariant) {
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-12 00:47:28 +02:00
|
|
|
func (c *Context) addDependency(module *moduleInfo, tag DependencyTag, depName string) []error {
|
2017-03-11 01:39:27 +01:00
|
|
|
if _, ok := tag.(BaseDependencyTag); ok {
|
|
|
|
panic("BaseDependencyTag is not allowed to be used directly!")
|
|
|
|
}
|
|
|
|
|
2016-05-17 23:58:05 +02:00
|
|
|
if depName == module.Name() {
|
2016-10-08 02:13:10 +02:00
|
|
|
return []error{&BlueprintError{
|
2014-12-19 01:28:54 +01:00
|
|
|
Err: fmt.Errorf("%q depends on itself", depName),
|
2015-11-04 02:33:29 +01:00
|
|
|
Pos: module.pos,
|
2014-12-19 01:28:54 +01:00
|
|
|
}}
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
possibleDeps := c.modulesFromName(depName, module.namespace())
|
2016-05-17 23:58:05 +02:00
|
|
|
if possibleDeps == nil {
|
2017-11-11 00:12:08 +01:00
|
|
|
return c.discoveredMissingDependencies(module, depName)
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2016-05-17 23:58:05 +02:00
|
|
|
if m := c.findMatchingVariant(module, possibleDeps); m != nil {
|
2016-04-12 00:47:28 +02:00
|
|
|
for _, dep := range module.directDeps {
|
|
|
|
if m == dep.module {
|
|
|
|
// TODO(ccross): what if adding a dependency with a different tag?
|
|
|
|
return nil
|
|
|
|
}
|
2015-03-11 04:08:19 +01:00
|
|
|
}
|
2016-04-12 00:47:28 +02:00
|
|
|
module.directDeps = append(module.directDeps, depInfo{m, tag})
|
2016-08-11 20:09:00 +02:00
|
|
|
atomic.AddUint32(&c.depsModified, 1)
|
2015-03-11 04:08:19 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Improve error messages when dependency variants are missing
Instead of just saying:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant "arch:android_arm_armv7-a, link:shared, vndk:"
Include a list of currently existing variants:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant:
arch:android_arm_armv7-a, link:shared, vndk:
available variants:
arch:android_arm_armv7-a, link:shared
arch:linux_x86, link:shared
arch:linux_x86_64, link:shared
arch:windows_x86, link:shared
arch:windows_x86_64, link:shared
This still isn't the best experience for users, but it at least provides
enough information for someone more familiar with the build to
understand the problem.
2017-03-20 22:11:38 +01:00
|
|
|
variants := make([]string, len(possibleDeps))
|
|
|
|
for i, mod := range possibleDeps {
|
|
|
|
variants[i] = c.prettyPrintVariant(mod.variant)
|
|
|
|
}
|
|
|
|
sort.Strings(variants)
|
|
|
|
|
2016-10-08 02:13:10 +02:00
|
|
|
return []error{&BlueprintError{
|
Improve error messages when dependency variants are missing
Instead of just saying:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant "arch:android_arm_armv7-a, link:shared, vndk:"
Include a list of currently existing variants:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant:
arch:android_arm_armv7-a, link:shared, vndk:
available variants:
arch:android_arm_armv7-a, link:shared
arch:linux_x86, link:shared
arch:linux_x86_64, link:shared
arch:windows_x86, link:shared
arch:windows_x86_64, link:shared
This still isn't the best experience for users, but it at least provides
enough information for someone more familiar with the build to
understand the problem.
2017-03-20 22:11:38 +01:00
|
|
|
Err: fmt.Errorf("dependency %q of %q missing variant:\n %s\navailable variants:\n %s",
|
2016-05-17 23:58:05 +02:00
|
|
|
depName, module.Name(),
|
Improve error messages when dependency variants are missing
Instead of just saying:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant "arch:android_arm_armv7-a, link:shared, vndk:"
Include a list of currently existing variants:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant:
arch:android_arm_armv7-a, link:shared, vndk:
available variants:
arch:android_arm_armv7-a, link:shared
arch:linux_x86, link:shared
arch:linux_x86_64, link:shared
arch:windows_x86, link:shared
arch:windows_x86_64, link:shared
This still isn't the best experience for users, but it at least provides
enough information for someone more familiar with the build to
understand the problem.
2017-03-20 22:11:38 +01:00
|
|
|
c.prettyPrintVariant(module.dependencyVariant),
|
|
|
|
strings.Join(variants, "\n ")),
|
2015-11-04 02:33:29 +01:00
|
|
|
Pos: module.pos,
|
2015-03-11 04:08:19 +01:00
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2015-11-04 01:41:29 +01:00
|
|
|
func (c *Context) findReverseDependency(module *moduleInfo, destName string) (*moduleInfo, []error) {
|
2016-05-17 23:58:05 +02:00
|
|
|
if destName == module.Name() {
|
2016-10-08 02:13:10 +02:00
|
|
|
return nil, []error{&BlueprintError{
|
2015-07-25 01:53:27 +02:00
|
|
|
Err: fmt.Errorf("%q depends on itself", destName),
|
|
|
|
Pos: module.pos,
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
possibleDeps := c.modulesFromName(destName, module.namespace())
|
2016-05-17 23:58:05 +02:00
|
|
|
if possibleDeps == nil {
|
2016-10-08 02:13:10 +02:00
|
|
|
return nil, []error{&BlueprintError{
|
2015-07-25 01:53:27 +02:00
|
|
|
Err: fmt.Errorf("%q has a reverse dependency on undefined module %q",
|
2016-05-17 23:58:05 +02:00
|
|
|
module.Name(), destName),
|
2015-07-25 01:53:27 +02:00
|
|
|
Pos: module.pos,
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2016-05-17 23:58:05 +02:00
|
|
|
if m := c.findMatchingVariant(module, possibleDeps); m != nil {
|
2015-11-04 01:41:29 +01:00
|
|
|
return m, nil
|
2015-07-25 01:53:27 +02:00
|
|
|
}
|
|
|
|
|
Improve error messages when dependency variants are missing
Instead of just saying:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant "arch:android_arm_armv7-a, link:shared, vndk:"
Include a list of currently existing variants:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant:
arch:android_arm_armv7-a, link:shared, vndk:
available variants:
arch:android_arm_armv7-a, link:shared
arch:linux_x86, link:shared
arch:linux_x86_64, link:shared
arch:windows_x86, link:shared
arch:windows_x86_64, link:shared
This still isn't the best experience for users, but it at least provides
enough information for someone more familiar with the build to
understand the problem.
2017-03-20 22:11:38 +01:00
|
|
|
variants := make([]string, len(possibleDeps))
|
|
|
|
for i, mod := range possibleDeps {
|
|
|
|
variants[i] = c.prettyPrintVariant(mod.variant)
|
|
|
|
}
|
|
|
|
sort.Strings(variants)
|
|
|
|
|
2016-10-08 02:13:10 +02:00
|
|
|
return nil, []error{&BlueprintError{
|
Improve error messages when dependency variants are missing
Instead of just saying:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant "arch:android_arm_armv7-a, link:shared, vndk:"
Include a list of currently existing variants:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant:
arch:android_arm_armv7-a, link:shared, vndk:
available variants:
arch:android_arm_armv7-a, link:shared
arch:linux_x86, link:shared
arch:linux_x86_64, link:shared
arch:windows_x86, link:shared
arch:windows_x86_64, link:shared
This still isn't the best experience for users, but it at least provides
enough information for someone more familiar with the build to
understand the problem.
2017-03-20 22:11:38 +01:00
|
|
|
Err: fmt.Errorf("reverse dependency %q of %q missing variant:\n %s\navailable variants:\n %s",
|
2016-05-17 23:58:05 +02:00
|
|
|
destName, module.Name(),
|
Improve error messages when dependency variants are missing
Instead of just saying:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant "arch:android_arm_armv7-a, link:shared, vndk:"
Include a list of currently existing variants:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant:
arch:android_arm_armv7-a, link:shared, vndk:
available variants:
arch:android_arm_armv7-a, link:shared
arch:linux_x86, link:shared
arch:linux_x86_64, link:shared
arch:windows_x86, link:shared
arch:windows_x86_64, link:shared
This still isn't the best experience for users, but it at least provides
enough information for someone more familiar with the build to
understand the problem.
2017-03-20 22:11:38 +01:00
|
|
|
c.prettyPrintVariant(module.dependencyVariant),
|
|
|
|
strings.Join(variants, "\n ")),
|
2015-07-25 01:53:27 +02:00
|
|
|
Pos: module.pos,
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2015-03-14 00:02:36 +01:00
|
|
|
func (c *Context) addVariationDependency(module *moduleInfo, variations []Variation,
|
2016-04-12 00:47:28 +02:00
|
|
|
tag DependencyTag, depName string, far bool) []error {
|
2017-03-11 01:39:27 +01:00
|
|
|
if _, ok := tag.(BaseDependencyTag); ok {
|
|
|
|
panic("BaseDependencyTag is not allowed to be used directly!")
|
|
|
|
}
|
2015-03-11 04:08:19 +01:00
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
possibleDeps := c.modulesFromName(depName, module.namespace())
|
2016-05-17 23:58:05 +02:00
|
|
|
if possibleDeps == nil {
|
2017-11-11 00:12:08 +01:00
|
|
|
return c.discoveredMissingDependencies(module, depName)
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2015-03-11 04:08:19 +01:00
|
|
|
// We can't just append variant.Variant to module.dependencyVariants.variantName and
|
|
|
|
// compare the strings because the result won't be in mutator registration order.
|
|
|
|
// Create a new map instead, and then deep compare the maps.
|
2015-05-08 20:14:54 +02:00
|
|
|
var newVariant variationMap
|
|
|
|
if !far {
|
|
|
|
newVariant = module.dependencyVariant.clone()
|
|
|
|
} else {
|
|
|
|
newVariant = make(variationMap)
|
|
|
|
}
|
2015-03-14 00:02:36 +01:00
|
|
|
for _, v := range variations {
|
|
|
|
newVariant[v.Mutator] = v.Variation
|
2015-03-11 04:08:19 +01:00
|
|
|
}
|
|
|
|
|
2016-05-17 23:58:05 +02:00
|
|
|
for _, m := range possibleDeps {
|
2015-05-08 20:14:54 +02:00
|
|
|
var found bool
|
|
|
|
if far {
|
|
|
|
found = m.variant.subset(newVariant)
|
|
|
|
} else {
|
|
|
|
found = m.variant.equal(newVariant)
|
|
|
|
}
|
|
|
|
if found {
|
2015-11-04 01:58:48 +01:00
|
|
|
if module == m {
|
2016-10-08 02:13:10 +02:00
|
|
|
return []error{&BlueprintError{
|
2015-11-04 01:58:48 +01:00
|
|
|
Err: fmt.Errorf("%q depends on itself", depName),
|
2015-11-04 02:33:29 +01:00
|
|
|
Pos: module.pos,
|
2015-11-04 01:58:48 +01:00
|
|
|
}}
|
|
|
|
}
|
2015-03-14 00:02:36 +01:00
|
|
|
// AddVariationDependency allows adding a dependency on itself, but only if
|
2015-03-11 04:08:19 +01:00
|
|
|
// that module is earlier in the module list than this one, since we always
|
2015-03-14 00:02:36 +01:00
|
|
|
// run GenerateBuildActions in order for the variants of a module
|
2016-05-17 23:58:05 +02:00
|
|
|
if m.group == module.group && beforeInModuleList(module, m, module.group.modules) {
|
2016-10-08 02:13:10 +02:00
|
|
|
return []error{&BlueprintError{
|
2015-03-11 04:08:19 +01:00
|
|
|
Err: fmt.Errorf("%q depends on later version of itself", depName),
|
2015-11-04 02:33:29 +01:00
|
|
|
Pos: module.pos,
|
2015-03-11 04:08:19 +01:00
|
|
|
}}
|
|
|
|
}
|
2016-04-12 00:47:28 +02:00
|
|
|
module.directDeps = append(module.directDeps, depInfo{m, tag})
|
2016-08-11 20:09:00 +02:00
|
|
|
atomic.AddUint32(&c.depsModified, 1)
|
2015-03-11 04:08:19 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
|
Improve error messages when dependency variants are missing
Instead of just saying:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant "arch:android_arm_armv7-a, link:shared, vndk:"
Include a list of currently existing variants:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant:
arch:android_arm_armv7-a, link:shared, vndk:
available variants:
arch:android_arm_armv7-a, link:shared
arch:linux_x86, link:shared
arch:linux_x86_64, link:shared
arch:windows_x86, link:shared
arch:windows_x86_64, link:shared
This still isn't the best experience for users, but it at least provides
enough information for someone more familiar with the build to
understand the problem.
2017-03-20 22:11:38 +01:00
|
|
|
variants := make([]string, len(possibleDeps))
|
|
|
|
for i, mod := range possibleDeps {
|
|
|
|
variants[i] = c.prettyPrintVariant(mod.variant)
|
|
|
|
}
|
|
|
|
sort.Strings(variants)
|
|
|
|
|
2016-10-08 02:13:10 +02:00
|
|
|
return []error{&BlueprintError{
|
Improve error messages when dependency variants are missing
Instead of just saying:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant "arch:android_arm_armv7-a, link:shared, vndk:"
Include a list of currently existing variants:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant:
arch:android_arm_armv7-a, link:shared, vndk:
available variants:
arch:android_arm_armv7-a, link:shared
arch:linux_x86, link:shared
arch:linux_x86_64, link:shared
arch:windows_x86, link:shared
arch:windows_x86_64, link:shared
This still isn't the best experience for users, but it at least provides
enough information for someone more familiar with the build to
understand the problem.
2017-03-20 22:11:38 +01:00
|
|
|
Err: fmt.Errorf("dependency %q of %q missing variant:\n %s\navailable variants:\n %s",
|
2016-05-17 23:58:05 +02:00
|
|
|
depName, module.Name(),
|
Improve error messages when dependency variants are missing
Instead of just saying:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant "arch:android_arm_armv7-a, link:shared, vndk:"
Include a list of currently existing variants:
error: .../Android.bp:48:1: dependency "libc++" of "libtest" missing variant:
arch:android_arm_armv7-a, link:shared, vndk:
available variants:
arch:android_arm_armv7-a, link:shared
arch:linux_x86, link:shared
arch:linux_x86_64, link:shared
arch:windows_x86, link:shared
arch:windows_x86_64, link:shared
This still isn't the best experience for users, but it at least provides
enough information for someone more familiar with the build to
understand the problem.
2017-03-20 22:11:38 +01:00
|
|
|
c.prettyPrintVariant(newVariant),
|
|
|
|
strings.Join(variants, "\n ")),
|
2015-11-04 02:33:29 +01:00
|
|
|
Pos: module.pos,
|
2015-03-11 04:08:19 +01:00
|
|
|
}}
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2016-04-12 02:33:13 +02:00
|
|
|
func (c *Context) addInterVariantDependency(origModule *moduleInfo, tag DependencyTag,
|
|
|
|
from, to Module) {
|
2017-03-11 01:39:27 +01:00
|
|
|
if _, ok := tag.(BaseDependencyTag); ok {
|
|
|
|
panic("BaseDependencyTag is not allowed to be used directly!")
|
|
|
|
}
|
2016-04-12 02:33:13 +02:00
|
|
|
|
|
|
|
var fromInfo, toInfo *moduleInfo
|
|
|
|
for _, m := range origModule.splitModules {
|
|
|
|
if m.logicModule == from {
|
|
|
|
fromInfo = m
|
|
|
|
}
|
|
|
|
if m.logicModule == to {
|
|
|
|
toInfo = m
|
|
|
|
if fromInfo != nil {
|
2016-05-17 23:58:05 +02:00
|
|
|
panic(fmt.Errorf("%q depends on later version of itself", origModule.Name()))
|
2016-04-12 02:33:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if fromInfo == nil || toInfo == nil {
|
|
|
|
panic(fmt.Errorf("AddInterVariantDependency called for module %q on invalid variant",
|
2016-05-17 23:58:05 +02:00
|
|
|
origModule.Name()))
|
2016-04-12 02:33:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fromInfo.directDeps = append(fromInfo.directDeps, depInfo{toInfo, tag})
|
2016-08-11 20:09:00 +02:00
|
|
|
atomic.AddUint32(&c.depsModified, 1)
|
2016-04-12 02:33:13 +02:00
|
|
|
}
|
|
|
|
|
2017-08-10 00:13:12 +02:00
|
|
|
// findBlueprintDescendants returns a map linking parent Blueprints files to child Blueprints files
|
|
|
|
// For example, if paths = []string{"a/b/c/Android.bp", "a/Blueprints"},
|
|
|
|
// then descendants = {"":[]string{"a/Blueprints"}, "a/Blueprints":[]string{"a/b/c/Android.bp"}}
|
|
|
|
func findBlueprintDescendants(paths []string) (descendants map[string][]string, err error) {
|
|
|
|
// make mapping from dir path to file path
|
|
|
|
filesByDir := make(map[string]string, len(paths))
|
|
|
|
for _, path := range paths {
|
|
|
|
dir := filepath.Dir(path)
|
|
|
|
_, alreadyFound := filesByDir[dir]
|
|
|
|
if alreadyFound {
|
|
|
|
return nil, fmt.Errorf("Found two Blueprint files in directory %v : %v and %v", dir, filesByDir[dir], path)
|
|
|
|
}
|
|
|
|
filesByDir[dir] = path
|
|
|
|
}
|
|
|
|
|
2017-11-30 03:37:31 +01:00
|
|
|
findAncestor := func(childFile string) (ancestor string) {
|
|
|
|
prevAncestorDir := filepath.Dir(childFile)
|
2017-08-10 00:13:12 +02:00
|
|
|
for {
|
|
|
|
ancestorDir := filepath.Dir(prevAncestorDir)
|
|
|
|
if ancestorDir == prevAncestorDir {
|
|
|
|
// reached the root dir without any matches; assign this as a descendant of ""
|
2017-11-30 03:37:31 +01:00
|
|
|
return ""
|
2017-08-10 00:13:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ancestorFile, ancestorExists := filesByDir[ancestorDir]
|
|
|
|
if ancestorExists {
|
2017-11-30 03:37:31 +01:00
|
|
|
return ancestorFile
|
2017-08-10 00:13:12 +02:00
|
|
|
}
|
|
|
|
prevAncestorDir = ancestorDir
|
|
|
|
}
|
|
|
|
}
|
2017-11-30 03:37:31 +01:00
|
|
|
// generate the descendants map
|
|
|
|
descendants = make(map[string][]string, len(filesByDir))
|
|
|
|
for _, childFile := range filesByDir {
|
|
|
|
ancestorFile := findAncestor(childFile)
|
|
|
|
descendants[ancestorFile] = append(descendants[ancestorFile], childFile)
|
|
|
|
}
|
2017-08-10 00:13:12 +02:00
|
|
|
return descendants, nil
|
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
type visitOrderer interface {
|
|
|
|
// returns the number of modules that this module needs to wait for
|
|
|
|
waitCount(module *moduleInfo) int
|
|
|
|
// returns the list of modules that are waiting for this module
|
|
|
|
propagate(module *moduleInfo) []*moduleInfo
|
|
|
|
// visit modules in order
|
|
|
|
visit(modules []*moduleInfo, visit func(*moduleInfo) bool)
|
|
|
|
}
|
|
|
|
|
|
|
|
type bottomUpVisitorImpl struct{}
|
|
|
|
|
|
|
|
func (bottomUpVisitorImpl) waitCount(module *moduleInfo) int {
|
|
|
|
return len(module.forwardDeps)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bottomUpVisitorImpl) propagate(module *moduleInfo) []*moduleInfo {
|
|
|
|
return module.reverseDeps
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bottomUpVisitorImpl) visit(modules []*moduleInfo, visit func(*moduleInfo) bool) {
|
|
|
|
for _, module := range modules {
|
|
|
|
if visit(module) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type topDownVisitorImpl struct{}
|
|
|
|
|
|
|
|
func (topDownVisitorImpl) waitCount(module *moduleInfo) int {
|
|
|
|
return len(module.reverseDeps)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (topDownVisitorImpl) propagate(module *moduleInfo) []*moduleInfo {
|
|
|
|
return module.forwardDeps
|
|
|
|
}
|
|
|
|
|
|
|
|
func (topDownVisitorImpl) visit(modules []*moduleInfo, visit func(*moduleInfo) bool) {
|
|
|
|
for i := 0; i < len(modules); i++ {
|
|
|
|
module := modules[len(modules)-1-i]
|
2016-08-06 07:30:44 +02:00
|
|
|
if visit(module) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
var (
|
|
|
|
bottomUpVisitor bottomUpVisitorImpl
|
|
|
|
topDownVisitor topDownVisitorImpl
|
|
|
|
)
|
|
|
|
|
2016-08-06 07:30:44 +02:00
|
|
|
// Calls visit on each module, guaranteeing that visit is not called on a module until visit on all
|
|
|
|
// of its dependencies has finished.
|
2016-08-11 20:09:00 +02:00
|
|
|
func (c *Context) parallelVisit(order visitOrderer, visit func(group *moduleInfo) bool) {
|
2015-03-11 23:43:52 +01:00
|
|
|
doneCh := make(chan *moduleInfo)
|
2016-08-12 00:37:45 +02:00
|
|
|
cancelCh := make(chan bool)
|
2015-01-08 03:08:56 +01:00
|
|
|
count := 0
|
2015-03-02 23:03:01 +01:00
|
|
|
cancel := false
|
2015-01-08 03:08:56 +01:00
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
for _, module := range c.modulesSorted {
|
2016-08-11 20:09:00 +02:00
|
|
|
module.waitingCount = order.waitCount(module)
|
2015-01-08 03:08:56 +01:00
|
|
|
}
|
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
visitOne := func(module *moduleInfo) {
|
2015-01-08 03:08:56 +01:00
|
|
|
count++
|
|
|
|
go func() {
|
2015-03-11 23:43:52 +01:00
|
|
|
ret := visit(module)
|
2015-03-02 23:03:01 +01:00
|
|
|
if ret {
|
2016-08-12 00:37:45 +02:00
|
|
|
cancelCh <- true
|
2015-03-02 23:03:01 +01:00
|
|
|
}
|
2015-03-11 23:43:52 +01:00
|
|
|
doneCh <- module
|
2015-01-08 03:08:56 +01:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
for _, module := range c.modulesSorted {
|
|
|
|
if module.waitingCount == 0 {
|
|
|
|
visitOne(module)
|
2015-01-08 03:08:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-04 19:41:00 +01:00
|
|
|
for count > 0 {
|
2015-01-08 03:08:56 +01:00
|
|
|
select {
|
2016-08-12 00:37:45 +02:00
|
|
|
case cancel = <-cancelCh:
|
2015-03-11 23:43:52 +01:00
|
|
|
case doneModule := <-doneCh:
|
2015-03-02 23:03:01 +01:00
|
|
|
if !cancel {
|
2016-08-11 20:09:00 +02:00
|
|
|
for _, module := range order.propagate(doneModule) {
|
|
|
|
module.waitingCount--
|
|
|
|
if module.waitingCount == 0 {
|
|
|
|
visitOne(module)
|
2015-03-02 23:03:01 +01:00
|
|
|
}
|
2015-01-08 03:08:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
count--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateDependencies recursively walks the module dependency graph and updates
|
|
|
|
// additional fields based on the dependencies. It builds a sorted list of modules
|
|
|
|
// such that dependencies of a module always appear first, and populates reverse
|
|
|
|
// dependency links and counts of total dependencies. It also reports errors when
|
|
|
|
// it encounters dependency cycles. This should called after resolveDependencies,
|
|
|
|
// as well as after any mutator pass has called addDependency
|
|
|
|
func (c *Context) updateDependencies() (errs []error) {
|
2015-03-11 23:43:52 +01:00
|
|
|
visited := make(map[*moduleInfo]bool) // modules that were already checked
|
|
|
|
checking := make(map[*moduleInfo]bool) // modules actively being checked
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
sorted := make([]*moduleInfo, 0, len(c.moduleInfo))
|
2014-12-17 23:16:51 +01:00
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
var check func(group *moduleInfo) []*moduleInfo
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
cycleError := func(cycle []*moduleInfo) {
|
2015-03-11 22:40:30 +01:00
|
|
|
// We are the "start" of the cycle, so we're responsible
|
|
|
|
// for generating the errors. The cycle list is in
|
|
|
|
// reverse order because all the 'check' calls append
|
|
|
|
// their own module to the list.
|
2016-10-08 02:13:10 +02:00
|
|
|
errs = append(errs, &BlueprintError{
|
2015-03-11 22:40:30 +01:00
|
|
|
Err: fmt.Errorf("encountered dependency cycle:"),
|
2015-03-11 23:43:52 +01:00
|
|
|
Pos: cycle[len(cycle)-1].pos,
|
2015-03-11 22:40:30 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
// Iterate backwards through the cycle list.
|
2015-03-25 00:42:56 +01:00
|
|
|
curModule := cycle[0]
|
2015-03-11 22:40:30 +01:00
|
|
|
for i := len(cycle) - 1; i >= 0; i-- {
|
2015-03-11 23:43:52 +01:00
|
|
|
nextModule := cycle[i]
|
2016-10-08 02:13:10 +02:00
|
|
|
errs = append(errs, &BlueprintError{
|
2015-03-11 22:40:30 +01:00
|
|
|
Err: fmt.Errorf(" %q depends on %q",
|
2016-05-17 23:58:05 +02:00
|
|
|
curModule.Name(),
|
|
|
|
nextModule.Name()),
|
2015-11-04 02:33:29 +01:00
|
|
|
Pos: curModule.pos,
|
2015-03-11 22:40:30 +01:00
|
|
|
})
|
2015-03-11 23:43:52 +01:00
|
|
|
curModule = nextModule
|
2015-03-11 22:40:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
check = func(module *moduleInfo) []*moduleInfo {
|
|
|
|
visited[module] = true
|
|
|
|
checking[module] = true
|
|
|
|
defer delete(checking, module)
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
deps := make(map[*moduleInfo]bool)
|
|
|
|
|
|
|
|
// Add an implicit dependency ordering on all earlier modules in the same module group
|
|
|
|
for _, dep := range module.group.modules {
|
|
|
|
if dep == module {
|
|
|
|
break
|
2014-12-18 01:12:41 +01:00
|
|
|
}
|
2015-03-11 23:43:52 +01:00
|
|
|
deps[dep] = true
|
2014-12-18 01:12:41 +01:00
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
for _, dep := range module.directDeps {
|
2016-04-12 00:47:28 +02:00
|
|
|
deps[dep.module] = true
|
2015-03-11 23:43:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
module.reverseDeps = []*moduleInfo{}
|
2016-08-11 20:09:00 +02:00
|
|
|
module.forwardDeps = []*moduleInfo{}
|
2015-01-08 03:08:56 +01:00
|
|
|
|
2014-12-18 01:12:41 +01:00
|
|
|
for dep := range deps {
|
2014-05-28 01:34:41 +02:00
|
|
|
if checking[dep] {
|
|
|
|
// This is a cycle.
|
2015-03-11 23:43:52 +01:00
|
|
|
return []*moduleInfo{dep, module}
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if !visited[dep] {
|
|
|
|
cycle := check(dep)
|
|
|
|
if cycle != nil {
|
2015-03-11 23:43:52 +01:00
|
|
|
if cycle[0] == module {
|
2014-05-28 01:34:41 +02:00
|
|
|
// We are the "start" of the cycle, so we're responsible
|
|
|
|
// for generating the errors. The cycle list is in
|
|
|
|
// reverse order because all the 'check' calls append
|
|
|
|
// their own module to the list.
|
2015-03-11 22:40:30 +01:00
|
|
|
cycleError(cycle)
|
2014-05-28 01:34:41 +02:00
|
|
|
|
|
|
|
// We can continue processing this module's children to
|
|
|
|
// find more cycles. Since all the modules that were
|
|
|
|
// part of the found cycle were marked as visited we
|
|
|
|
// won't run into that cycle again.
|
|
|
|
} else {
|
|
|
|
// We're not the "start" of the cycle, so we just append
|
|
|
|
// our module to the list and return it.
|
2015-03-11 23:43:52 +01:00
|
|
|
return append(cycle, module)
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-01-08 03:08:56 +01:00
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
module.forwardDeps = append(module.forwardDeps, dep)
|
2015-03-11 23:43:52 +01:00
|
|
|
dep.reverseDeps = append(dep.reverseDeps, module)
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
sorted = append(sorted, module)
|
2014-12-17 23:16:51 +01:00
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
for _, module := range c.moduleInfo {
|
|
|
|
if !visited[module] {
|
|
|
|
cycle := check(module)
|
2014-05-28 01:34:41 +02:00
|
|
|
if cycle != nil {
|
2015-03-11 23:43:52 +01:00
|
|
|
if cycle[len(cycle)-1] != module {
|
2015-03-11 22:40:30 +01:00
|
|
|
panic("inconceivable!")
|
|
|
|
}
|
|
|
|
cycleError(cycle)
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
c.modulesSorted = sorted
|
2014-12-17 23:16:51 +01:00
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-06-13 05:06:50 +02:00
|
|
|
// PrepareBuildActions generates an internal representation of all the build
|
|
|
|
// actions that need to be performed. This process involves invoking the
|
|
|
|
// GenerateBuildActions method on each of the Module objects created during the
|
|
|
|
// parse phase and then on each of the registered Singleton objects.
|
|
|
|
//
|
|
|
|
// If the ResolveDependencies method has not already been called it is called
|
|
|
|
// automatically by this method.
|
|
|
|
//
|
|
|
|
// The config argument is made available to all of the Module and Singleton
|
|
|
|
// objects via the Config method on the ModuleContext and SingletonContext
|
|
|
|
// objects passed to GenerateBuildActions. It is also passed to the functions
|
|
|
|
// specified via PoolFunc, RuleFunc, and VariableFunc so that they can compute
|
|
|
|
// config-specific values.
|
2014-06-26 02:21:54 +02:00
|
|
|
//
|
|
|
|
// The returned deps is a list of the ninja files dependencies that were added
|
2015-12-19 00:18:03 +01:00
|
|
|
// by the modules and singletons via the ModuleContext.AddNinjaFileDeps(),
|
|
|
|
// SingletonContext.AddNinjaFileDeps(), and PackageContext.AddNinjaFileDeps()
|
|
|
|
// methods.
|
2014-06-26 02:21:54 +02:00
|
|
|
func (c *Context) PrepareBuildActions(config interface{}) (deps []string, errs []error) {
|
2014-05-28 01:34:41 +02:00
|
|
|
c.buildActionsReady = false
|
|
|
|
|
|
|
|
if !c.dependenciesReady {
|
2017-08-01 02:26:06 +02:00
|
|
|
extraDeps, errs := c.ResolveDependencies(config)
|
2014-05-28 01:34:41 +02:00
|
|
|
if len(errs) > 0 {
|
2014-06-26 02:21:54 +02:00
|
|
|
return nil, errs
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
2017-08-01 02:26:06 +02:00
|
|
|
deps = append(deps, extraDeps...)
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2017-11-07 22:29:54 +01:00
|
|
|
depsModules, errs := c.generateModuleBuildActions(config, c.liveGlobals)
|
2014-05-28 01:34:41 +02:00
|
|
|
if len(errs) > 0 {
|
2014-06-26 02:21:54 +02:00
|
|
|
return nil, errs
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2017-11-07 22:29:54 +01:00
|
|
|
depsSingletons, errs := c.generateSingletonBuildActions(config, c.singletonInfo, c.liveGlobals)
|
2014-05-28 01:34:41 +02:00
|
|
|
if len(errs) > 0 {
|
2014-06-26 02:21:54 +02:00
|
|
|
return nil, errs
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2017-08-01 02:26:06 +02:00
|
|
|
deps = append(deps, depsModules...)
|
|
|
|
deps = append(deps, depsSingletons...)
|
2014-06-26 02:21:54 +02:00
|
|
|
|
2015-11-19 01:01:01 +01:00
|
|
|
if c.ninjaBuildDir != nil {
|
2017-11-07 22:29:54 +01:00
|
|
|
c.liveGlobals.addNinjaStringDeps(c.ninjaBuildDir)
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2017-11-07 22:29:54 +01:00
|
|
|
pkgNames, depsPackages := c.makeUniquePackageNames(c.liveGlobals)
|
2015-12-19 00:18:03 +01:00
|
|
|
|
|
|
|
deps = append(deps, depsPackages...)
|
2014-05-28 01:34:41 +02:00
|
|
|
|
|
|
|
// This will panic if it finds a problem since it's a programming error.
|
2017-11-07 22:29:54 +01:00
|
|
|
c.checkForVariableReferenceCycles(c.liveGlobals.variables, pkgNames)
|
2014-05-28 01:34:41 +02:00
|
|
|
|
|
|
|
c.pkgNames = pkgNames
|
2017-11-07 22:29:54 +01:00
|
|
|
c.globalVariables = c.liveGlobals.variables
|
|
|
|
c.globalPools = c.liveGlobals.pools
|
|
|
|
c.globalRules = c.liveGlobals.rules
|
2014-05-28 01:34:41 +02:00
|
|
|
|
|
|
|
c.buildActionsReady = true
|
|
|
|
|
2014-06-26 02:21:54 +02:00
|
|
|
return deps, nil
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2017-08-01 02:26:06 +02:00
|
|
|
func (c *Context) runMutators(config interface{}) (deps []string, errs []error) {
|
2016-08-10 21:56:40 +02:00
|
|
|
var mutators []*mutatorInfo
|
|
|
|
|
|
|
|
mutators = append(mutators, c.earlyMutatorInfo...)
|
|
|
|
mutators = append(mutators, c.mutatorInfo...)
|
2015-10-29 23:32:56 +01:00
|
|
|
|
2016-08-10 21:56:40 +02:00
|
|
|
for _, mutator := range mutators {
|
2017-08-01 02:26:06 +02:00
|
|
|
var newDeps []string
|
2014-12-19 01:28:54 +01:00
|
|
|
if mutator.topDownMutator != nil {
|
2017-08-01 02:26:06 +02:00
|
|
|
newDeps, errs = c.runMutator(config, mutator, topDownMutator)
|
2014-12-19 01:28:54 +01:00
|
|
|
} else if mutator.bottomUpMutator != nil {
|
2017-08-01 02:26:06 +02:00
|
|
|
newDeps, errs = c.runMutator(config, mutator, bottomUpMutator)
|
2014-12-19 01:28:54 +01:00
|
|
|
} else {
|
|
|
|
panic("no mutator set on " + mutator.name)
|
|
|
|
}
|
|
|
|
if len(errs) > 0 {
|
2017-08-01 02:26:06 +02:00
|
|
|
return nil, errs
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
2017-08-01 02:26:06 +02:00
|
|
|
deps = append(deps, newDeps...)
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2017-08-01 02:26:06 +02:00
|
|
|
return deps, nil
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
type mutatorDirection interface {
|
|
|
|
run(mutator *mutatorInfo, ctx *mutatorContext)
|
|
|
|
orderer() visitOrderer
|
|
|
|
fmt.Stringer
|
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
type bottomUpMutatorImpl struct{}
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
func (bottomUpMutatorImpl) run(mutator *mutatorInfo, ctx *mutatorContext) {
|
|
|
|
mutator.bottomUpMutator(ctx)
|
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
func (bottomUpMutatorImpl) orderer() visitOrderer {
|
|
|
|
return bottomUpVisitor
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bottomUpMutatorImpl) String() string {
|
|
|
|
return "bottom up mutator"
|
|
|
|
}
|
|
|
|
|
|
|
|
type topDownMutatorImpl struct{}
|
|
|
|
|
|
|
|
func (topDownMutatorImpl) run(mutator *mutatorInfo, ctx *mutatorContext) {
|
|
|
|
mutator.topDownMutator(ctx)
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
func (topDownMutatorImpl) orderer() visitOrderer {
|
|
|
|
return topDownVisitor
|
|
|
|
}
|
|
|
|
|
|
|
|
func (topDownMutatorImpl) String() string {
|
|
|
|
return "top down mutator"
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
topDownMutator topDownMutatorImpl
|
|
|
|
bottomUpMutator bottomUpMutatorImpl
|
|
|
|
)
|
|
|
|
|
2016-08-06 07:30:44 +02:00
|
|
|
type reverseDep struct {
|
|
|
|
module *moduleInfo
|
|
|
|
dep depInfo
|
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
func (c *Context) runMutator(config interface{}, mutator *mutatorInfo,
|
2017-08-01 02:26:06 +02:00
|
|
|
direction mutatorDirection) (deps []string, errs []error) {
|
2016-08-06 07:30:44 +02:00
|
|
|
|
|
|
|
newModuleInfo := make(map[Module]*moduleInfo)
|
|
|
|
for k, v := range c.moduleInfo {
|
|
|
|
newModuleInfo[k] = v
|
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2016-12-09 19:29:05 +01:00
|
|
|
type globalStateChange struct {
|
2017-07-28 23:32:36 +02:00
|
|
|
reverse []reverseDep
|
|
|
|
rename []rename
|
|
|
|
replace []replace
|
|
|
|
newModules []*moduleInfo
|
2017-08-01 02:26:06 +02:00
|
|
|
deps []string
|
2016-12-09 19:29:05 +01:00
|
|
|
}
|
|
|
|
|
2016-04-12 00:47:28 +02:00
|
|
|
reverseDeps := make(map[*moduleInfo][]depInfo)
|
2016-12-09 19:29:05 +01:00
|
|
|
var rename []rename
|
|
|
|
var replace []replace
|
2017-07-28 23:32:36 +02:00
|
|
|
var newModules []*moduleInfo
|
2015-11-04 01:41:29 +01:00
|
|
|
|
2016-08-06 07:30:44 +02:00
|
|
|
errsCh := make(chan []error)
|
2016-12-09 19:29:05 +01:00
|
|
|
globalStateCh := make(chan globalStateChange)
|
2017-07-29 00:22:46 +02:00
|
|
|
newVariationsCh := make(chan []*moduleInfo)
|
2016-08-06 07:30:44 +02:00
|
|
|
done := make(chan bool)
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
c.depsModified = 0
|
|
|
|
|
2016-08-06 07:30:44 +02:00
|
|
|
visit := func(module *moduleInfo) bool {
|
2015-04-15 05:28:10 +02:00
|
|
|
if module.splitModules != nil {
|
|
|
|
panic("split module found in sorted module list")
|
|
|
|
}
|
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
mctx := &mutatorContext{
|
|
|
|
baseModuleContext: baseModuleContext{
|
|
|
|
context: c,
|
|
|
|
config: config,
|
|
|
|
module: module,
|
|
|
|
},
|
2016-08-06 07:30:44 +02:00
|
|
|
name: mutator.name,
|
2015-03-11 23:43:52 +01:00
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2016-01-07 22:43:09 +01:00
|
|
|
func() {
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
2016-08-11 20:09:00 +02:00
|
|
|
in := fmt.Sprintf("%s %q for %s", direction, mutator.name, module)
|
2016-01-07 22:43:09 +01:00
|
|
|
if err, ok := r.(panicError); ok {
|
|
|
|
err.addIn(in)
|
|
|
|
mctx.error(err)
|
|
|
|
} else {
|
|
|
|
mctx.error(newPanicErrorf(r, in))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-08-11 20:09:00 +02:00
|
|
|
direction.run(mutator, mctx)
|
2016-01-07 22:43:09 +01:00
|
|
|
}()
|
2016-08-06 07:30:44 +02:00
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
if len(mctx.errs) > 0 {
|
2016-08-12 00:37:45 +02:00
|
|
|
errsCh <- mctx.errs
|
2016-08-06 07:30:44 +02:00
|
|
|
return true
|
2015-03-11 23:43:52 +01:00
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2017-07-29 00:22:46 +02:00
|
|
|
if len(mctx.newVariations) > 0 {
|
|
|
|
newVariationsCh <- mctx.newVariations
|
2015-03-11 23:43:52 +01:00
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2017-07-28 23:32:36 +02:00
|
|
|
if len(mctx.reverseDeps) > 0 || len(mctx.replace) > 0 || len(mctx.rename) > 0 || len(mctx.newModules) > 0 {
|
2016-12-09 19:29:05 +01:00
|
|
|
globalStateCh <- globalStateChange{
|
2017-07-28 23:32:36 +02:00
|
|
|
reverse: mctx.reverseDeps,
|
|
|
|
replace: mctx.replace,
|
|
|
|
rename: mctx.rename,
|
|
|
|
newModules: mctx.newModules,
|
2017-08-01 02:26:06 +02:00
|
|
|
deps: mctx.ninjaFileDeps,
|
2016-12-09 19:29:05 +01:00
|
|
|
}
|
2016-08-06 07:30:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process errs and reverseDeps in a single goroutine
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case newErrs := <-errsCh:
|
|
|
|
errs = append(errs, newErrs...)
|
2016-12-09 19:29:05 +01:00
|
|
|
case globalStateChange := <-globalStateCh:
|
|
|
|
for _, r := range globalStateChange.reverse {
|
2016-08-06 07:30:44 +02:00
|
|
|
reverseDeps[r.module] = append(reverseDeps[r.module], r.dep)
|
|
|
|
}
|
2016-12-09 19:29:05 +01:00
|
|
|
replace = append(replace, globalStateChange.replace...)
|
|
|
|
rename = append(rename, globalStateChange.rename...)
|
2017-07-28 23:32:36 +02:00
|
|
|
newModules = append(newModules, globalStateChange.newModules...)
|
2017-08-01 02:26:06 +02:00
|
|
|
deps = append(deps, globalStateChange.deps...)
|
2017-07-29 00:22:46 +02:00
|
|
|
case newVariations := <-newVariationsCh:
|
|
|
|
for _, m := range newVariations {
|
2016-08-06 07:30:44 +02:00
|
|
|
newModuleInfo[m.logicModule] = m
|
|
|
|
}
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
2016-08-06 07:30:44 +02:00
|
|
|
}()
|
2014-12-19 01:28:54 +01:00
|
|
|
|
2016-08-06 07:30:44 +02:00
|
|
|
if mutator.parallel {
|
2016-08-11 20:09:00 +02:00
|
|
|
c.parallelVisit(direction.orderer(), visit)
|
2016-08-06 07:30:44 +02:00
|
|
|
} else {
|
2016-08-11 20:09:00 +02:00
|
|
|
direction.orderer().visit(c.modulesSorted, visit)
|
2016-08-06 07:30:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
done <- true
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
2017-08-01 02:26:06 +02:00
|
|
|
return nil, errs
|
2016-08-06 07:30:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
c.moduleInfo = newModuleInfo
|
|
|
|
|
|
|
|
for _, group := range c.moduleGroups {
|
|
|
|
for i := 0; i < len(group.modules); i++ {
|
|
|
|
module := group.modules[i]
|
|
|
|
|
|
|
|
// Update module group to contain newly split variants
|
|
|
|
if module.splitModules != nil {
|
|
|
|
group.modules, i = spliceModules(group.modules, i, module.splitModules)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fix up any remaining dependencies on modules that were split into variants
|
|
|
|
// by replacing them with the first variant
|
|
|
|
for j, dep := range module.directDeps {
|
|
|
|
if dep.module.logicModule == nil {
|
|
|
|
module.directDeps[j].module = dep.module.splitModules[0]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2015-11-04 01:41:29 +01:00
|
|
|
for module, deps := range reverseDeps {
|
2016-04-12 00:47:28 +02:00
|
|
|
sort.Sort(depSorter(deps))
|
2015-11-04 01:41:29 +01:00
|
|
|
module.directDeps = append(module.directDeps, deps...)
|
2016-08-11 20:09:00 +02:00
|
|
|
c.depsModified++
|
2015-11-04 01:41:29 +01:00
|
|
|
}
|
|
|
|
|
2017-07-28 23:32:36 +02:00
|
|
|
for _, module := range newModules {
|
|
|
|
errs = c.addModule(module)
|
|
|
|
if len(errs) > 0 {
|
2017-08-01 02:26:06 +02:00
|
|
|
return nil, errs
|
2017-07-28 23:32:36 +02:00
|
|
|
}
|
|
|
|
atomic.AddUint32(&c.depsModified, 1)
|
|
|
|
}
|
|
|
|
|
2016-12-09 19:29:05 +01:00
|
|
|
errs = c.handleRenames(rename)
|
|
|
|
if len(errs) > 0 {
|
2017-08-01 02:26:06 +02:00
|
|
|
return nil, errs
|
2016-12-09 19:29:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
errs = c.handleReplacements(replace)
|
2016-10-12 19:45:05 +02:00
|
|
|
if len(errs) > 0 {
|
2017-08-01 02:26:06 +02:00
|
|
|
return nil, errs
|
2016-10-12 19:45:05 +02:00
|
|
|
}
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
if c.depsModified > 0 {
|
|
|
|
errs = c.updateDependencies()
|
|
|
|
if len(errs) > 0 {
|
2017-08-01 02:26:06 +02:00
|
|
|
return nil, errs
|
2016-08-11 20:09:00 +02:00
|
|
|
}
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2017-08-01 02:26:06 +02:00
|
|
|
return deps, errs
|
2014-12-19 01:28:54 +01:00
|
|
|
}
|
|
|
|
|
2016-04-12 00:41:52 +02:00
|
|
|
// Replaces every build logic module with a clone of itself. Prevents introducing problems where
|
|
|
|
// a mutator sets a non-property member variable on a module, which works until a later mutator
|
|
|
|
// creates variants of that module.
|
|
|
|
func (c *Context) cloneModules() {
|
2016-08-09 23:21:02 +02:00
|
|
|
type update struct {
|
|
|
|
orig Module
|
|
|
|
clone *moduleInfo
|
|
|
|
}
|
|
|
|
ch := make(chan update, 100)
|
|
|
|
|
2016-04-12 00:41:52 +02:00
|
|
|
for _, m := range c.modulesSorted {
|
2016-08-09 23:21:02 +02:00
|
|
|
go func(m *moduleInfo) {
|
|
|
|
origLogicModule := m.logicModule
|
2017-07-28 23:31:03 +02:00
|
|
|
m.logicModule, m.properties = c.cloneLogicModule(m)
|
2016-08-09 23:21:02 +02:00
|
|
|
ch <- update{origLogicModule, m}
|
|
|
|
}(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < len(c.modulesSorted); i++ {
|
|
|
|
update := <-ch
|
|
|
|
delete(c.moduleInfo, update.orig)
|
|
|
|
c.moduleInfo[update.clone.logicModule] = update.clone
|
2016-04-12 00:41:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-06 07:30:44 +02:00
|
|
|
// Removes modules[i] from the list and inserts newModules... where it was located, returning
|
|
|
|
// the new slice and the index of the last inserted element
|
|
|
|
func spliceModules(modules []*moduleInfo, i int, newModules []*moduleInfo) ([]*moduleInfo, int) {
|
2015-03-11 23:43:52 +01:00
|
|
|
spliceSize := len(newModules)
|
|
|
|
newLen := len(modules) + spliceSize - 1
|
|
|
|
var dest []*moduleInfo
|
|
|
|
if cap(modules) >= len(modules)-1+len(newModules) {
|
|
|
|
// We can fit the splice in the existing capacity, do everything in place
|
|
|
|
dest = modules[:newLen]
|
|
|
|
} else {
|
|
|
|
dest = make([]*moduleInfo, newLen)
|
|
|
|
copy(dest, modules[:i])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move the end of the slice over by spliceSize-1
|
2015-03-16 08:13:59 +01:00
|
|
|
copy(dest[i+spliceSize:], modules[i+1:])
|
2015-03-11 23:43:52 +01:00
|
|
|
|
|
|
|
// Copy the new modules into the slice
|
2015-03-16 08:13:59 +01:00
|
|
|
copy(dest[i:], newModules)
|
2015-03-11 23:43:52 +01:00
|
|
|
|
2016-08-06 07:30:44 +02:00
|
|
|
return dest, i + spliceSize - 1
|
2015-03-11 23:43:52 +01:00
|
|
|
}
|
|
|
|
|
2014-06-12 03:31:16 +02:00
|
|
|
func (c *Context) generateModuleBuildActions(config interface{},
|
2014-06-26 02:21:54 +02:00
|
|
|
liveGlobals *liveTracker) ([]string, []error) {
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2014-06-26 02:21:54 +02:00
|
|
|
var deps []string
|
2014-05-28 01:34:41 +02:00
|
|
|
var errs []error
|
|
|
|
|
2015-01-08 03:08:56 +01:00
|
|
|
cancelCh := make(chan struct{})
|
|
|
|
errsCh := make(chan []error)
|
|
|
|
depsCh := make(chan []string)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-cancelCh:
|
|
|
|
close(cancelCh)
|
|
|
|
return
|
|
|
|
case newErrs := <-errsCh:
|
|
|
|
errs = append(errs, newErrs...)
|
|
|
|
case newDeps := <-depsCh:
|
|
|
|
deps = append(deps, newDeps...)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-08-11 20:09:00 +02:00
|
|
|
c.parallelVisit(bottomUpVisitor, func(module *moduleInfo) bool {
|
2017-12-02 02:10:52 +01:00
|
|
|
|
|
|
|
uniqueName := c.nameInterface.UniqueName(newNamespaceContext(module), module.group.name)
|
|
|
|
sanitizedName := toNinjaName(uniqueName)
|
|
|
|
|
|
|
|
prefix := moduleNamespacePrefix(sanitizedName + "_" + module.variantName)
|
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
// The parent scope of the moduleContext's local scope gets overridden to be that of the
|
|
|
|
// calling Go package on a per-call basis. Since the initial parent scope doesn't matter we
|
|
|
|
// just set it to nil.
|
|
|
|
scope := newLocalScope(nil, prefix)
|
|
|
|
|
|
|
|
mctx := &moduleContext{
|
|
|
|
baseModuleContext: baseModuleContext{
|
|
|
|
context: c,
|
|
|
|
config: config,
|
|
|
|
module: module,
|
|
|
|
},
|
2015-12-18 00:49:30 +01:00
|
|
|
scope: scope,
|
|
|
|
handledMissingDeps: module.missingDeps == nil,
|
2015-03-11 23:43:52 +01:00
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2016-01-07 22:43:09 +01:00
|
|
|
func() {
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
in := fmt.Sprintf("GenerateBuildActions for %s", module)
|
|
|
|
if err, ok := r.(panicError); ok {
|
|
|
|
err.addIn(in)
|
|
|
|
mctx.error(err)
|
|
|
|
} else {
|
|
|
|
mctx.error(newPanicErrorf(r, in))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
mctx.module.logicModule.GenerateBuildActions(mctx)
|
|
|
|
}()
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
if len(mctx.errs) > 0 {
|
|
|
|
errsCh <- mctx.errs
|
|
|
|
return true
|
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-12-18 00:49:30 +01:00
|
|
|
if module.missingDeps != nil && !mctx.handledMissingDeps {
|
|
|
|
var errs []error
|
|
|
|
for _, depName := range module.missingDeps {
|
2017-11-11 00:12:08 +01:00
|
|
|
errs = append(errs, c.missingDependencyError(module, depName))
|
2015-12-18 00:49:30 +01:00
|
|
|
}
|
|
|
|
errsCh <- errs
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-03-11 23:43:52 +01:00
|
|
|
depsCh <- mctx.ninjaFileDeps
|
2014-06-26 02:21:54 +02:00
|
|
|
|
2015-03-12 00:17:52 +01:00
|
|
|
newErrs := c.processLocalBuildActions(&module.actionDefs,
|
2015-03-11 23:43:52 +01:00
|
|
|
&mctx.actionDefs, liveGlobals)
|
|
|
|
if len(newErrs) > 0 {
|
|
|
|
errsCh <- newErrs
|
|
|
|
return true
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
2015-03-02 23:03:01 +01:00
|
|
|
return false
|
2015-01-08 03:08:56 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
cancelCh <- struct{}{}
|
|
|
|
<-cancelCh
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2014-06-26 02:21:54 +02:00
|
|
|
return deps, errs
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2014-06-12 03:31:16 +02:00
|
|
|
func (c *Context) generateSingletonBuildActions(config interface{},
|
2017-11-07 22:29:54 +01:00
|
|
|
singletons []*singletonInfo, liveGlobals *liveTracker) ([]string, []error) {
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2014-06-26 02:21:54 +02:00
|
|
|
var deps []string
|
2014-05-28 01:34:41 +02:00
|
|
|
var errs []error
|
2014-06-26 02:21:54 +02:00
|
|
|
|
2017-11-07 22:29:54 +01:00
|
|
|
for _, info := range singletons {
|
2014-09-25 02:51:52 +02:00
|
|
|
// The parent scope of the singletonContext's local scope gets overridden to be that of the
|
|
|
|
// calling Go package on a per-call basis. Since the initial parent scope doesn't matter we
|
|
|
|
// just set it to nil.
|
2015-08-26 02:58:17 +02:00
|
|
|
scope := newLocalScope(nil, singletonNamespacePrefix(info.name))
|
2014-05-28 01:34:41 +02:00
|
|
|
|
|
|
|
sctx := &singletonContext{
|
|
|
|
context: c,
|
|
|
|
config: config,
|
2014-09-25 02:51:52 +02:00
|
|
|
scope: scope,
|
2016-01-15 00:42:54 +01:00
|
|
|
globals: liveGlobals,
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2016-01-07 22:43:09 +01:00
|
|
|
func() {
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
in := fmt.Sprintf("GenerateBuildActions for singleton %s", info.name)
|
|
|
|
if err, ok := r.(panicError); ok {
|
|
|
|
err.addIn(in)
|
|
|
|
sctx.error(err)
|
|
|
|
} else {
|
|
|
|
sctx.error(newPanicErrorf(r, in))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
info.singleton.GenerateBuildActions(sctx)
|
|
|
|
}()
|
2014-05-28 01:34:41 +02:00
|
|
|
|
|
|
|
if len(sctx.errs) > 0 {
|
|
|
|
errs = append(errs, sctx.errs...)
|
|
|
|
if len(errs) > maxErrors {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-06-26 02:21:54 +02:00
|
|
|
deps = append(deps, sctx.ninjaFileDeps...)
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
newErrs := c.processLocalBuildActions(&info.actionDefs,
|
|
|
|
&sctx.actionDefs, liveGlobals)
|
|
|
|
errs = append(errs, newErrs...)
|
|
|
|
if len(errs) > maxErrors {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-26 02:21:54 +02:00
|
|
|
return deps, errs
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) processLocalBuildActions(out, in *localBuildActions,
|
|
|
|
liveGlobals *liveTracker) []error {
|
|
|
|
|
|
|
|
var errs []error
|
|
|
|
|
|
|
|
// First we go through and add everything referenced by the module's
|
|
|
|
// buildDefs to the live globals set. This will end up adding the live
|
|
|
|
// locals to the set as well, but we'll take them out after.
|
|
|
|
for _, def := range in.buildDefs {
|
|
|
|
err := liveGlobals.AddBuildDefDeps(def)
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return errs
|
|
|
|
}
|
|
|
|
|
2014-12-19 01:28:54 +01:00
|
|
|
out.buildDefs = append(out.buildDefs, in.buildDefs...)
|
2014-05-28 01:34:41 +02:00
|
|
|
|
|
|
|
// We use the now-incorrect set of live "globals" to determine which local
|
|
|
|
// definitions are live. As we go through copying those live locals to the
|
2014-12-19 01:28:54 +01:00
|
|
|
// moduleGroup we remove them from the live globals set.
|
2014-05-28 01:34:41 +02:00
|
|
|
for _, v := range in.variables {
|
2015-03-12 00:17:52 +01:00
|
|
|
isLive := liveGlobals.RemoveVariableIfLive(v)
|
2014-05-28 01:34:41 +02:00
|
|
|
if isLive {
|
|
|
|
out.variables = append(out.variables, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, r := range in.rules {
|
2015-03-12 00:17:52 +01:00
|
|
|
isLive := liveGlobals.RemoveRuleIfLive(r)
|
2014-05-28 01:34:41 +02:00
|
|
|
if isLive {
|
|
|
|
out.rules = append(out.rules, r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-10-06 23:03:27 +02:00
|
|
|
func (c *Context) walkDeps(topModule *moduleInfo,
|
2016-08-07 07:52:01 +02:00
|
|
|
visitDown func(depInfo, *moduleInfo) bool, visitUp func(depInfo, *moduleInfo)) {
|
2015-10-06 23:03:27 +02:00
|
|
|
|
|
|
|
visited := make(map[*moduleInfo]bool)
|
2016-01-07 22:43:09 +01:00
|
|
|
var visiting *moduleInfo
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
2016-08-07 07:52:01 +02:00
|
|
|
panic(newPanicErrorf(r, "WalkDeps(%s, %s, %s) for dependency %s",
|
|
|
|
topModule, funcName(visitDown), funcName(visitUp), visiting))
|
2016-01-07 22:43:09 +01:00
|
|
|
}
|
|
|
|
}()
|
2015-10-06 23:03:27 +02:00
|
|
|
|
|
|
|
var walk func(module *moduleInfo)
|
|
|
|
walk = func(module *moduleInfo) {
|
2016-04-12 00:47:28 +02:00
|
|
|
for _, dep := range module.directDeps {
|
|
|
|
if !visited[dep.module] {
|
|
|
|
visited[dep.module] = true
|
|
|
|
visiting = dep.module
|
2016-08-07 07:52:01 +02:00
|
|
|
recurse := true
|
|
|
|
if visitDown != nil {
|
|
|
|
recurse = visitDown(dep, module)
|
|
|
|
}
|
|
|
|
if recurse {
|
2016-04-12 00:47:28 +02:00
|
|
|
walk(dep.module)
|
2015-10-06 23:03:27 +02:00
|
|
|
}
|
2016-08-07 07:52:01 +02:00
|
|
|
if visitUp != nil {
|
|
|
|
visitUp(dep, module)
|
|
|
|
}
|
2015-10-06 23:03:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
walk(topModule)
|
|
|
|
}
|
|
|
|
|
2016-10-11 18:58:53 +02:00
|
|
|
type replace struct {
|
|
|
|
from, to *moduleInfo
|
|
|
|
}
|
|
|
|
|
2016-10-12 19:45:05 +02:00
|
|
|
type rename struct {
|
|
|
|
group *moduleGroup
|
|
|
|
name string
|
|
|
|
}
|
|
|
|
|
2016-12-09 19:29:05 +01:00
|
|
|
func (c *Context) moduleMatchingVariant(module *moduleInfo, name string) *moduleInfo {
|
2017-11-11 00:12:08 +01:00
|
|
|
targets := c.modulesFromName(name, module.namespace())
|
2016-10-11 18:58:53 +02:00
|
|
|
|
|
|
|
if targets == nil {
|
2016-12-09 19:29:05 +01:00
|
|
|
return nil
|
2016-10-11 18:58:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, m := range targets {
|
|
|
|
if module.variantName == m.variantName {
|
2016-12-09 19:29:05 +01:00
|
|
|
return m
|
2016-10-11 18:58:53 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-09 19:29:05 +01:00
|
|
|
return nil
|
2016-10-11 18:58:53 +02:00
|
|
|
}
|
|
|
|
|
2016-12-09 19:29:05 +01:00
|
|
|
func (c *Context) handleRenames(renames []rename) []error {
|
2016-10-12 19:45:05 +02:00
|
|
|
var errs []error
|
2016-12-09 19:29:05 +01:00
|
|
|
for _, rename := range renames {
|
2016-10-12 19:45:05 +02:00
|
|
|
group, name := rename.group, rename.name
|
2017-11-11 00:12:08 +01:00
|
|
|
if name == group.name || len(group.modules) < 1 {
|
2016-10-12 19:45:05 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
errs = append(errs, c.nameInterface.Rename(group.name, rename.name, group.namespace)...)
|
2016-10-12 19:45:05 +02:00
|
|
|
}
|
|
|
|
|
2016-12-09 19:29:05 +01:00
|
|
|
return errs
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) handleReplacements(replacements []replace) []error {
|
|
|
|
var errs []error
|
|
|
|
for _, replace := range replacements {
|
2016-10-11 18:58:53 +02:00
|
|
|
for _, m := range replace.from.reverseDeps {
|
|
|
|
for i, d := range m.directDeps {
|
|
|
|
if d.module == replace.from {
|
|
|
|
m.directDeps[i].module = replace.to
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic.AddUint32(&c.depsModified, 1)
|
|
|
|
}
|
2016-12-09 19:29:05 +01:00
|
|
|
|
2016-10-12 19:45:05 +02:00
|
|
|
return errs
|
|
|
|
}
|
2016-01-07 22:43:09 +01:00
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
func (c *Context) discoveredMissingDependencies(module *moduleInfo, depName string) (errs []error) {
|
|
|
|
if c.allowMissingDependencies {
|
|
|
|
module.missingDeps = append(module.missingDeps, depName)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return []error{c.missingDependencyError(module, depName)}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) missingDependencyError(module *moduleInfo, depName string) (errs error) {
|
|
|
|
err := c.nameInterface.MissingDependencyError(module.Name(), module.namespace(), depName)
|
|
|
|
|
|
|
|
return &BlueprintError{
|
|
|
|
Err: err,
|
|
|
|
Pos: module.pos,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) modulesFromName(name string, namespace Namespace) []*moduleInfo {
|
|
|
|
group, exists := c.nameInterface.ModuleFromName(name, namespace)
|
|
|
|
if exists {
|
2016-05-17 23:58:05 +02:00
|
|
|
return group.modules
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
func (c *Context) sortedModuleGroups() []*moduleGroup {
|
|
|
|
if c.cachedSortedModuleGroups == nil {
|
|
|
|
unwrap := func(wrappers []ModuleGroup) []*moduleGroup {
|
|
|
|
result := make([]*moduleGroup, 0, len(wrappers))
|
|
|
|
for _, group := range wrappers {
|
|
|
|
result = append(result, group.moduleGroup)
|
|
|
|
}
|
|
|
|
return result
|
2014-09-25 05:26:52 +02:00
|
|
|
}
|
2017-11-11 00:12:08 +01:00
|
|
|
|
|
|
|
c.cachedSortedModuleGroups = unwrap(c.nameInterface.AllModules())
|
2014-09-25 05:26:52 +02:00
|
|
|
}
|
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
return c.cachedSortedModuleGroups
|
2014-09-25 05:26:52 +02:00
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
func (c *Context) visitAllModules(visit func(Module)) {
|
2016-01-07 22:43:09 +01:00
|
|
|
var module *moduleInfo
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
panic(newPanicErrorf(r, "VisitAllModules(%s) for %s",
|
|
|
|
funcName(visit), module))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
for _, moduleGroup := range c.sortedModuleGroups() {
|
|
|
|
for _, module = range moduleGroup.modules {
|
2014-12-18 01:12:41 +01:00
|
|
|
visit(module.logicModule)
|
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) visitAllModulesIf(pred func(Module) bool,
|
|
|
|
visit func(Module)) {
|
|
|
|
|
2016-01-07 22:43:09 +01:00
|
|
|
var module *moduleInfo
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
panic(newPanicErrorf(r, "VisitAllModulesIf(%s, %s) for %s",
|
|
|
|
funcName(pred), funcName(visit), module))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2017-11-11 00:12:08 +01:00
|
|
|
for _, moduleGroup := range c.sortedModuleGroups() {
|
|
|
|
for _, module := range moduleGroup.modules {
|
2014-12-18 01:12:41 +01:00
|
|
|
if pred(module.logicModule) {
|
|
|
|
visit(module.logicModule)
|
|
|
|
}
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-07 22:43:09 +01:00
|
|
|
func (c *Context) visitAllModuleVariants(module *moduleInfo,
|
|
|
|
visit func(Module)) {
|
|
|
|
|
|
|
|
var variant *moduleInfo
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
panic(newPanicErrorf(r, "VisitAllModuleVariants(%s, %s) for %s",
|
|
|
|
module, funcName(visit), variant))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for _, variant = range module.group.modules {
|
|
|
|
visit(variant.logicModule)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
func (c *Context) requireNinjaVersion(major, minor, micro int) {
|
|
|
|
if major != 1 {
|
|
|
|
panic("ninja version with major version != 1 not supported")
|
|
|
|
}
|
|
|
|
if c.requiredNinjaMinor < minor {
|
|
|
|
c.requiredNinjaMinor = minor
|
|
|
|
c.requiredNinjaMicro = micro
|
|
|
|
}
|
|
|
|
if c.requiredNinjaMinor == minor && c.requiredNinjaMicro < micro {
|
|
|
|
c.requiredNinjaMicro = micro
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-19 01:01:01 +01:00
|
|
|
func (c *Context) setNinjaBuildDir(value *ninjaString) {
|
|
|
|
if c.ninjaBuildDir == nil {
|
|
|
|
c.ninjaBuildDir = value
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) makeUniquePackageNames(
|
2015-12-19 00:18:03 +01:00
|
|
|
liveGlobals *liveTracker) (map[*packageContext]string, []string) {
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-11-26 00:29:32 +01:00
|
|
|
pkgs := make(map[string]*packageContext)
|
|
|
|
pkgNames := make(map[*packageContext]string)
|
|
|
|
longPkgNames := make(map[*packageContext]bool)
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2015-11-26 00:29:32 +01:00
|
|
|
processPackage := func(pctx *packageContext) {
|
2014-10-03 11:49:58 +02:00
|
|
|
if pctx == nil {
|
2014-05-28 01:34:41 +02:00
|
|
|
// This is a built-in rule and has no package.
|
|
|
|
return
|
|
|
|
}
|
2014-10-03 11:49:58 +02:00
|
|
|
if _, ok := pkgNames[pctx]; ok {
|
2014-05-28 01:34:41 +02:00
|
|
|
// We've already processed this package.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-10-03 11:49:58 +02:00
|
|
|
otherPkg, present := pkgs[pctx.shortName]
|
2014-05-28 01:34:41 +02:00
|
|
|
if present {
|
|
|
|
// Short name collision. Both this package and the one that's
|
|
|
|
// already there need to use their full names. We leave the short
|
|
|
|
// name in pkgNames for now so future collisions still get caught.
|
2014-10-03 11:49:58 +02:00
|
|
|
longPkgNames[pctx] = true
|
2014-05-28 01:34:41 +02:00
|
|
|
longPkgNames[otherPkg] = true
|
|
|
|
} else {
|
|
|
|
// No collision so far. Tentatively set the package's name to be
|
|
|
|
// its short name.
|
2014-10-03 11:49:58 +02:00
|
|
|
pkgNames[pctx] = pctx.shortName
|
2015-04-15 03:02:20 +02:00
|
|
|
pkgs[pctx.shortName] = pctx
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We try to give all packages their short name, but when we get collisions
|
|
|
|
// we need to use the full unique package name.
|
|
|
|
for v, _ := range liveGlobals.variables {
|
2014-10-03 11:49:58 +02:00
|
|
|
processPackage(v.packageContext())
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
for p, _ := range liveGlobals.pools {
|
2014-10-03 11:49:58 +02:00
|
|
|
processPackage(p.packageContext())
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
for r, _ := range liveGlobals.rules {
|
2014-10-03 11:49:58 +02:00
|
|
|
processPackage(r.packageContext())
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add the packages that had collisions using their full unique names. This
|
|
|
|
// will overwrite any short names that were added in the previous step.
|
2014-10-03 11:49:58 +02:00
|
|
|
for pctx := range longPkgNames {
|
|
|
|
pkgNames[pctx] = pctx.fullName
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2015-12-19 00:18:03 +01:00
|
|
|
// Create deps list from calls to PackageContext.AddNinjaFileDeps
|
|
|
|
deps := []string{}
|
|
|
|
for _, pkg := range pkgs {
|
|
|
|
deps = append(deps, pkg.ninjaFileDeps...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return pkgNames, deps
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) checkForVariableReferenceCycles(
|
2015-11-26 00:29:32 +01:00
|
|
|
variables map[Variable]*ninjaString, pkgNames map[*packageContext]string) {
|
2014-05-28 01:34:41 +02:00
|
|
|
|
|
|
|
visited := make(map[Variable]bool) // variables that were already checked
|
|
|
|
checking := make(map[Variable]bool) // variables actively being checked
|
|
|
|
|
|
|
|
var check func(v Variable) []Variable
|
|
|
|
|
|
|
|
check = func(v Variable) []Variable {
|
|
|
|
visited[v] = true
|
|
|
|
checking[v] = true
|
|
|
|
defer delete(checking, v)
|
|
|
|
|
|
|
|
value := variables[v]
|
|
|
|
for _, dep := range value.variables {
|
|
|
|
if checking[dep] {
|
|
|
|
// This is a cycle.
|
|
|
|
return []Variable{dep, v}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !visited[dep] {
|
|
|
|
cycle := check(dep)
|
|
|
|
if cycle != nil {
|
|
|
|
if cycle[0] == v {
|
|
|
|
// We are the "start" of the cycle, so we're responsible
|
|
|
|
// for generating the errors. The cycle list is in
|
|
|
|
// reverse order because all the 'check' calls append
|
|
|
|
// their own module to the list.
|
|
|
|
msgs := []string{"detected variable reference cycle:"}
|
|
|
|
|
|
|
|
// Iterate backwards through the cycle list.
|
|
|
|
curName := v.fullName(pkgNames)
|
|
|
|
curValue := value.Value(pkgNames)
|
|
|
|
for i := len(cycle) - 1; i >= 0; i-- {
|
|
|
|
next := cycle[i]
|
|
|
|
nextName := next.fullName(pkgNames)
|
|
|
|
nextValue := variables[next].Value(pkgNames)
|
|
|
|
|
|
|
|
msgs = append(msgs, fmt.Sprintf(
|
|
|
|
" %q depends on %q", curName, nextName))
|
|
|
|
msgs = append(msgs, fmt.Sprintf(
|
|
|
|
" [%s = %s]", curName, curValue))
|
|
|
|
|
|
|
|
curName = nextName
|
|
|
|
curValue = nextValue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Variable reference cycles are a programming error,
|
|
|
|
// not the fault of the Blueprint file authors.
|
|
|
|
panic(strings.Join(msgs, "\n"))
|
|
|
|
} else {
|
|
|
|
// We're not the "start" of the cycle, so we just append
|
|
|
|
// our module to the list and return it.
|
|
|
|
return append(cycle, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for v := range variables {
|
|
|
|
if !visited[v] {
|
|
|
|
cycle := check(v)
|
|
|
|
if cycle != nil {
|
|
|
|
panic("inconceivable!")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-28 06:34:56 +01:00
|
|
|
// AllTargets returns a map all the build target names to the rule used to build
|
|
|
|
// them. This is the same information that is output by running 'ninja -t
|
|
|
|
// targets all'. If this is called before PrepareBuildActions successfully
|
|
|
|
// completes then ErrbuildActionsNotReady is returned.
|
|
|
|
func (c *Context) AllTargets() (map[string]string, error) {
|
|
|
|
if !c.buildActionsReady {
|
|
|
|
return nil, ErrBuildActionsNotReady
|
|
|
|
}
|
|
|
|
|
|
|
|
targets := map[string]string{}
|
|
|
|
|
|
|
|
// Collect all the module build targets.
|
2015-03-12 00:17:52 +01:00
|
|
|
for _, module := range c.moduleInfo {
|
|
|
|
for _, buildDef := range module.actionDefs.buildDefs {
|
2014-10-28 06:34:56 +01:00
|
|
|
ruleName := buildDef.Rule.fullName(c.pkgNames)
|
2016-10-26 06:26:12 +02:00
|
|
|
for _, output := range append(buildDef.Outputs, buildDef.ImplicitOutputs...) {
|
2014-11-22 00:12:08 +01:00
|
|
|
outputValue, err := output.Eval(c.globalVariables)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-10-28 06:34:56 +01:00
|
|
|
targets[outputValue] = ruleName
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect all the singleton build targets.
|
|
|
|
for _, info := range c.singletonInfo {
|
|
|
|
for _, buildDef := range info.actionDefs.buildDefs {
|
|
|
|
ruleName := buildDef.Rule.fullName(c.pkgNames)
|
2016-10-26 06:26:12 +02:00
|
|
|
for _, output := range append(buildDef.Outputs, buildDef.ImplicitOutputs...) {
|
2014-11-22 00:12:08 +01:00
|
|
|
outputValue, err := output.Eval(c.globalVariables)
|
|
|
|
if err != nil {
|
2014-12-31 01:05:02 +01:00
|
|
|
return nil, err
|
2014-11-22 00:12:08 +01:00
|
|
|
}
|
2014-10-28 06:34:56 +01:00
|
|
|
targets[outputValue] = ruleName
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return targets, nil
|
|
|
|
}
|
|
|
|
|
2015-11-19 01:01:01 +01:00
|
|
|
func (c *Context) NinjaBuildDir() (string, error) {
|
|
|
|
if c.ninjaBuildDir != nil {
|
|
|
|
return c.ninjaBuildDir.Eval(c.globalVariables)
|
|
|
|
} else {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-13 23:36:24 +02:00
|
|
|
// ModuleTypePropertyStructs returns a mapping from module type name to a list of pointers to
|
|
|
|
// property structs returned by the factory for that module type.
|
|
|
|
func (c *Context) ModuleTypePropertyStructs() map[string][]interface{} {
|
|
|
|
ret := make(map[string][]interface{})
|
|
|
|
for moduleType, factory := range c.moduleFactories {
|
|
|
|
_, ret[moduleType] = factory()
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) ModuleName(logicModule Module) string {
|
|
|
|
module := c.moduleInfo[logicModule]
|
2016-05-17 23:58:05 +02:00
|
|
|
return module.Name()
|
2015-05-13 23:36:24 +02:00
|
|
|
}
|
|
|
|
|
2017-12-01 02:30:42 +01:00
|
|
|
func (c *Context) ModulePath(logicModule Module) string {
|
2015-05-13 23:36:24 +02:00
|
|
|
module := c.moduleInfo[logicModule]
|
2017-12-01 02:30:42 +01:00
|
|
|
return module.relBlueprintsFile
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) ModuleDir(logicModule Module) string {
|
|
|
|
return filepath.Dir(c.ModulePath(logicModule))
|
2015-05-13 23:36:24 +02:00
|
|
|
}
|
|
|
|
|
2015-12-18 03:02:11 +01:00
|
|
|
func (c *Context) ModuleSubDir(logicModule Module) string {
|
|
|
|
module := c.moduleInfo[logicModule]
|
|
|
|
return module.variantName
|
|
|
|
}
|
|
|
|
|
2016-07-26 00:51:50 +02:00
|
|
|
func (c *Context) ModuleType(logicModule Module) string {
|
|
|
|
module := c.moduleInfo[logicModule]
|
|
|
|
return module.typeName
|
|
|
|
}
|
|
|
|
|
2015-05-13 23:36:24 +02:00
|
|
|
func (c *Context) BlueprintFile(logicModule Module) string {
|
|
|
|
module := c.moduleInfo[logicModule]
|
|
|
|
return module.relBlueprintsFile
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) ModuleErrorf(logicModule Module, format string,
|
|
|
|
args ...interface{}) error {
|
|
|
|
|
|
|
|
module := c.moduleInfo[logicModule]
|
2016-10-08 02:13:10 +02:00
|
|
|
return &BlueprintError{
|
2015-05-13 23:36:24 +02:00
|
|
|
Err: fmt.Errorf(format, args...),
|
|
|
|
Pos: module.pos,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) VisitAllModules(visit func(Module)) {
|
|
|
|
c.visitAllModules(visit)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) VisitAllModulesIf(pred func(Module) bool,
|
|
|
|
visit func(Module)) {
|
|
|
|
|
|
|
|
c.visitAllModulesIf(pred, visit)
|
|
|
|
}
|
|
|
|
|
2017-03-17 21:09:05 +01:00
|
|
|
func (c *Context) VisitDirectDeps(module Module, visit func(Module)) {
|
|
|
|
topModule := c.moduleInfo[module]
|
|
|
|
|
|
|
|
var visiting *moduleInfo
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
panic(newPanicErrorf(r, "VisitDirectDeps(%s, %s) for dependency %s",
|
|
|
|
topModule, funcName(visit), visiting))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for _, dep := range topModule.directDeps {
|
|
|
|
visiting = dep.module
|
|
|
|
visit(dep.module.logicModule)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) VisitDirectDepsIf(module Module, pred func(Module) bool, visit func(Module)) {
|
|
|
|
topModule := c.moduleInfo[module]
|
|
|
|
|
|
|
|
var visiting *moduleInfo
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
panic(newPanicErrorf(r, "VisitDirectDepsIf(%s, %s, %s) for dependency %s",
|
|
|
|
topModule, funcName(pred), funcName(visit), visiting))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for _, dep := range topModule.directDeps {
|
|
|
|
visiting = dep.module
|
|
|
|
if pred(dep.module.logicModule) {
|
|
|
|
visit(dep.module.logicModule)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-05-13 23:36:24 +02:00
|
|
|
|
2017-03-17 21:09:05 +01:00
|
|
|
func (c *Context) VisitDepsDepthFirst(module Module, visit func(Module)) {
|
2016-08-07 07:52:01 +02:00
|
|
|
topModule := c.moduleInfo[module]
|
|
|
|
|
|
|
|
var visiting *moduleInfo
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
panic(newPanicErrorf(r, "VisitDepsDepthFirst(%s, %s) for dependency %s",
|
|
|
|
topModule, funcName(visit), visiting))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
c.walkDeps(topModule, nil, func(dep depInfo, parent *moduleInfo) {
|
|
|
|
visiting = dep.module
|
|
|
|
visit(dep.module.logicModule)
|
|
|
|
})
|
2015-05-13 23:36:24 +02:00
|
|
|
}
|
|
|
|
|
2017-03-17 21:09:05 +01:00
|
|
|
func (c *Context) VisitDepsDepthFirstIf(module Module, pred func(Module) bool, visit func(Module)) {
|
2016-08-07 07:52:01 +02:00
|
|
|
topModule := c.moduleInfo[module]
|
|
|
|
|
|
|
|
var visiting *moduleInfo
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
panic(newPanicErrorf(r, "VisitDepsDepthFirstIf(%s, %s, %s) for dependency %s",
|
|
|
|
topModule, funcName(pred), funcName(visit), visiting))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
c.walkDeps(topModule, nil, func(dep depInfo, parent *moduleInfo) {
|
|
|
|
if pred(dep.module.logicModule) {
|
|
|
|
visiting = dep.module
|
|
|
|
visit(dep.module.logicModule)
|
|
|
|
}
|
|
|
|
})
|
2015-05-13 23:36:24 +02:00
|
|
|
}
|
|
|
|
|
2015-11-18 01:22:29 +01:00
|
|
|
func (c *Context) PrimaryModule(module Module) Module {
|
|
|
|
return c.moduleInfo[module].group.modules[0].logicModule
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) FinalModule(module Module) Module {
|
|
|
|
modules := c.moduleInfo[module].group.modules
|
|
|
|
return modules[len(modules)-1].logicModule
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) VisitAllModuleVariants(module Module,
|
|
|
|
visit func(Module)) {
|
|
|
|
|
2016-01-07 22:43:09 +01:00
|
|
|
c.visitAllModuleVariants(c.moduleInfo[module], visit)
|
2015-11-18 01:22:29 +01:00
|
|
|
}
|
|
|
|
|
2014-06-13 05:06:50 +02:00
|
|
|
// WriteBuildFile writes the Ninja manifeset text for the generated build
|
|
|
|
// actions to w. If this is called before PrepareBuildActions successfully
|
|
|
|
// completes then ErrBuildActionsNotReady is returned.
|
2014-05-28 01:34:41 +02:00
|
|
|
func (c *Context) WriteBuildFile(w io.Writer) error {
|
|
|
|
if !c.buildActionsReady {
|
|
|
|
return ErrBuildActionsNotReady
|
|
|
|
}
|
|
|
|
|
|
|
|
nw := newNinjaWriter(w)
|
|
|
|
|
|
|
|
err := c.writeBuildFileHeader(nw)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = c.writeNinjaRequiredVersion(nw)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Group the globals by package.
|
|
|
|
|
|
|
|
err = c.writeGlobalVariables(nw)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = c.writeGlobalPools(nw)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = c.writeBuildDir(nw)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = c.writeGlobalRules(nw)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = c.writeAllModuleActions(nw)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = c.writeAllSingletonActions(nw)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-25 05:26:52 +02:00
|
|
|
type pkgAssociation struct {
|
|
|
|
PkgName string
|
|
|
|
PkgPath string
|
|
|
|
}
|
|
|
|
|
|
|
|
type pkgAssociationSorter struct {
|
|
|
|
pkgs []pkgAssociation
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pkgAssociationSorter) Len() int {
|
|
|
|
return len(s.pkgs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pkgAssociationSorter) Less(i, j int) bool {
|
|
|
|
iName := s.pkgs[i].PkgName
|
|
|
|
jName := s.pkgs[j].PkgName
|
|
|
|
return iName < jName
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pkgAssociationSorter) Swap(i, j int) {
|
|
|
|
s.pkgs[i], s.pkgs[j] = s.pkgs[j], s.pkgs[i]
|
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
func (c *Context) writeBuildFileHeader(nw *ninjaWriter) error {
|
|
|
|
headerTemplate := template.New("fileHeader")
|
|
|
|
_, err := headerTemplate.Parse(fileHeaderTemplate)
|
|
|
|
if err != nil {
|
|
|
|
// This is a programming error.
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var pkgs []pkgAssociation
|
|
|
|
maxNameLen := 0
|
|
|
|
for pkg, name := range c.pkgNames {
|
|
|
|
pkgs = append(pkgs, pkgAssociation{
|
|
|
|
PkgName: name,
|
|
|
|
PkgPath: pkg.pkgPath,
|
|
|
|
})
|
|
|
|
if len(name) > maxNameLen {
|
|
|
|
maxNameLen = len(name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range pkgs {
|
|
|
|
pkgs[i].PkgName += strings.Repeat(" ", maxNameLen-len(pkgs[i].PkgName))
|
|
|
|
}
|
|
|
|
|
2014-09-25 05:26:52 +02:00
|
|
|
sort.Sort(&pkgAssociationSorter{pkgs})
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
params := map[string]interface{}{
|
|
|
|
"Pkgs": pkgs,
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
err = headerTemplate.Execute(buf, params)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nw.Comment(buf.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) writeNinjaRequiredVersion(nw *ninjaWriter) error {
|
|
|
|
value := fmt.Sprintf("%d.%d.%d", c.requiredNinjaMajor, c.requiredNinjaMinor,
|
|
|
|
c.requiredNinjaMicro)
|
|
|
|
|
|
|
|
err := nw.Assign("ninja_required_version", value)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nw.BlankLine()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) writeBuildDir(nw *ninjaWriter) error {
|
2015-11-19 01:01:01 +01:00
|
|
|
if c.ninjaBuildDir != nil {
|
|
|
|
err := nw.Assign("builddir", c.ninjaBuildDir.Value(c.pkgNames))
|
2014-05-28 01:34:41 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-25 05:26:52 +02:00
|
|
|
type globalEntity interface {
|
2015-11-26 00:29:32 +01:00
|
|
|
fullName(pkgNames map[*packageContext]string) string
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2014-09-25 05:26:52 +02:00
|
|
|
type globalEntitySorter struct {
|
2015-11-26 00:29:32 +01:00
|
|
|
pkgNames map[*packageContext]string
|
2014-09-25 05:26:52 +02:00
|
|
|
entities []globalEntity
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2014-09-25 05:26:52 +02:00
|
|
|
func (s *globalEntitySorter) Len() int {
|
|
|
|
return len(s.entities)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *globalEntitySorter) Less(i, j int) bool {
|
|
|
|
iName := s.entities[i].fullName(s.pkgNames)
|
|
|
|
jName := s.entities[j].fullName(s.pkgNames)
|
2014-05-28 01:34:41 +02:00
|
|
|
return iName < jName
|
|
|
|
}
|
|
|
|
|
2014-09-25 05:26:52 +02:00
|
|
|
func (s *globalEntitySorter) Swap(i, j int) {
|
|
|
|
s.entities[i], s.entities[j] = s.entities[j], s.entities[i]
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) writeGlobalVariables(nw *ninjaWriter) error {
|
|
|
|
visited := make(map[Variable]bool)
|
|
|
|
|
|
|
|
var walk func(v Variable) error
|
|
|
|
walk = func(v Variable) error {
|
|
|
|
visited[v] = true
|
|
|
|
|
|
|
|
// First visit variables on which this variable depends.
|
|
|
|
value := c.globalVariables[v]
|
|
|
|
for _, dep := range value.variables {
|
|
|
|
if !visited[dep] {
|
|
|
|
err := walk(dep)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err := nw.Assign(v.fullName(c.pkgNames), value.Value(c.pkgNames))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-25 05:26:52 +02:00
|
|
|
globalVariables := make([]globalEntity, 0, len(c.globalVariables))
|
|
|
|
for variable := range c.globalVariables {
|
|
|
|
globalVariables = append(globalVariables, variable)
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
|
2014-09-25 05:26:52 +02:00
|
|
|
sort.Sort(&globalEntitySorter{c.pkgNames, globalVariables})
|
2014-05-28 01:34:41 +02:00
|
|
|
|
2014-09-25 05:26:52 +02:00
|
|
|
for _, entity := range globalVariables {
|
|
|
|
v := entity.(Variable)
|
2014-05-28 01:34:41 +02:00
|
|
|
if !visited[v] {
|
|
|
|
err := walk(v)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) writeGlobalPools(nw *ninjaWriter) error {
|
2014-09-25 05:26:52 +02:00
|
|
|
globalPools := make([]globalEntity, 0, len(c.globalPools))
|
|
|
|
for pool := range c.globalPools {
|
|
|
|
globalPools = append(globalPools, pool)
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Sort(&globalEntitySorter{c.pkgNames, globalPools})
|
|
|
|
|
|
|
|
for _, entity := range globalPools {
|
|
|
|
pool := entity.(Pool)
|
2014-05-28 01:34:41 +02:00
|
|
|
name := pool.fullName(c.pkgNames)
|
2014-09-25 05:26:52 +02:00
|
|
|
def := c.globalPools[pool]
|
2014-05-28 01:34:41 +02:00
|
|
|
err := def.WriteTo(nw, name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) writeGlobalRules(nw *ninjaWriter) error {
|
2014-09-25 05:26:52 +02:00
|
|
|
globalRules := make([]globalEntity, 0, len(c.globalRules))
|
|
|
|
for rule := range c.globalRules {
|
|
|
|
globalRules = append(globalRules, rule)
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Sort(&globalEntitySorter{c.pkgNames, globalRules})
|
|
|
|
|
|
|
|
for _, entity := range globalRules {
|
|
|
|
rule := entity.(Rule)
|
2014-05-28 01:34:41 +02:00
|
|
|
name := rule.fullName(c.pkgNames)
|
2014-09-25 05:26:52 +02:00
|
|
|
def := c.globalRules[rule]
|
2014-05-28 01:34:41 +02:00
|
|
|
err := def.WriteTo(nw, name, c.pkgNames)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-12 00:47:28 +02:00
|
|
|
type depSorter []depInfo
|
|
|
|
|
|
|
|
func (s depSorter) Len() int {
|
|
|
|
return len(s)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s depSorter) Less(i, j int) bool {
|
2016-05-17 23:58:05 +02:00
|
|
|
iName := s[i].module.Name()
|
|
|
|
jName := s[j].module.Name()
|
2016-04-12 00:47:28 +02:00
|
|
|
if iName == jName {
|
|
|
|
iName = s[i].module.variantName
|
|
|
|
jName = s[j].module.variantName
|
|
|
|
}
|
|
|
|
return iName < jName
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s depSorter) Swap(i, j int) {
|
|
|
|
s[i], s[j] = s[j], s[i]
|
|
|
|
}
|
|
|
|
|
2017-12-02 02:10:52 +01:00
|
|
|
type moduleSorter struct {
|
|
|
|
modules []*moduleInfo
|
|
|
|
nameInterface NameInterface
|
|
|
|
}
|
2014-06-12 01:27:16 +02:00
|
|
|
|
2015-03-12 00:17:52 +01:00
|
|
|
func (s moduleSorter) Len() int {
|
2017-12-02 02:10:52 +01:00
|
|
|
return len(s.modules)
|
2014-06-12 01:27:16 +02:00
|
|
|
}
|
|
|
|
|
2015-03-12 00:17:52 +01:00
|
|
|
func (s moduleSorter) Less(i, j int) bool {
|
2017-12-02 02:10:52 +01:00
|
|
|
iMod := s.modules[i]
|
|
|
|
jMod := s.modules[j]
|
|
|
|
iName := s.nameInterface.UniqueName(newNamespaceContext(iMod), iMod.group.name)
|
|
|
|
jName := s.nameInterface.UniqueName(newNamespaceContext(jMod), jMod.group.name)
|
|
|
|
if iName == jName {
|
|
|
|
iName = s.modules[i].variantName
|
|
|
|
jName = s.modules[j].variantName
|
|
|
|
}
|
|
|
|
|
2015-03-12 00:17:52 +01:00
|
|
|
if iName == jName {
|
2017-12-02 02:10:52 +01:00
|
|
|
panic(fmt.Sprintf("duplicate module name: %s: %#v and %#v\n", iName, iMod, jMod))
|
2015-03-12 00:17:52 +01:00
|
|
|
}
|
2014-06-12 01:27:16 +02:00
|
|
|
return iName < jName
|
|
|
|
}
|
|
|
|
|
2015-03-12 00:17:52 +01:00
|
|
|
func (s moduleSorter) Swap(i, j int) {
|
2017-12-02 02:10:52 +01:00
|
|
|
s.modules[i], s.modules[j] = s.modules[j], s.modules[i]
|
2014-06-12 01:27:16 +02:00
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
func (c *Context) writeAllModuleActions(nw *ninjaWriter) error {
|
|
|
|
headerTemplate := template.New("moduleHeader")
|
|
|
|
_, err := headerTemplate.Parse(moduleHeaderTemplate)
|
|
|
|
if err != nil {
|
|
|
|
// This is a programming error.
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2015-03-12 00:17:52 +01:00
|
|
|
modules := make([]*moduleInfo, 0, len(c.moduleInfo))
|
|
|
|
for _, module := range c.moduleInfo {
|
|
|
|
modules = append(modules, module)
|
2014-06-12 01:27:16 +02:00
|
|
|
}
|
2017-12-02 02:10:52 +01:00
|
|
|
sort.Sort(moduleSorter{modules, c.nameInterface})
|
2014-06-12 01:27:16 +02:00
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
|
2015-03-12 00:17:52 +01:00
|
|
|
for _, module := range modules {
|
2015-07-21 00:55:37 +02:00
|
|
|
if len(module.actionDefs.variables)+len(module.actionDefs.rules)+len(module.actionDefs.buildDefs) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
buf.Reset()
|
2014-06-05 00:33:08 +02:00
|
|
|
|
|
|
|
// In order to make the bootstrap build manifest independent of the
|
|
|
|
// build dir we need to output the Blueprints file locations in the
|
|
|
|
// comments as paths relative to the source directory.
|
2015-03-12 00:17:52 +01:00
|
|
|
relPos := module.pos
|
|
|
|
relPos.Filename = module.relBlueprintsFile
|
2014-06-05 00:33:08 +02:00
|
|
|
|
2014-09-25 02:51:52 +02:00
|
|
|
// Get the name and location of the factory function for the module.
|
2017-07-28 23:32:36 +02:00
|
|
|
factoryFunc := runtime.FuncForPC(reflect.ValueOf(module.factory).Pointer())
|
2014-09-25 02:51:52 +02:00
|
|
|
factoryName := factoryFunc.Name()
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
infoMap := map[string]interface{}{
|
2016-05-17 23:58:05 +02:00
|
|
|
"name": module.Name(),
|
|
|
|
"typeName": module.typeName,
|
|
|
|
"goFactory": factoryName,
|
|
|
|
"pos": relPos,
|
|
|
|
"variant": module.variantName,
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
err = headerTemplate.Execute(buf, infoMap)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.Comment(buf.String())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-03-12 00:17:52 +01:00
|
|
|
err = c.writeLocalBuildActions(nw, &module.actionDefs)
|
2014-05-28 01:34:41 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) writeAllSingletonActions(nw *ninjaWriter) error {
|
|
|
|
headerTemplate := template.New("singletonHeader")
|
|
|
|
_, err := headerTemplate.Parse(singletonHeaderTemplate)
|
|
|
|
if err != nil {
|
|
|
|
// This is a programming error.
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
|
2015-08-26 02:58:17 +02:00
|
|
|
for _, info := range c.singletonInfo {
|
2015-07-21 00:55:37 +02:00
|
|
|
if len(info.actionDefs.variables)+len(info.actionDefs.rules)+len(info.actionDefs.buildDefs) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-09-25 02:51:52 +02:00
|
|
|
// Get the name of the factory function for the module.
|
|
|
|
factory := info.factory
|
|
|
|
factoryFunc := runtime.FuncForPC(reflect.ValueOf(factory).Pointer())
|
|
|
|
factoryName := factoryFunc.Name()
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
buf.Reset()
|
|
|
|
infoMap := map[string]interface{}{
|
2015-08-26 02:58:17 +02:00
|
|
|
"name": info.name,
|
2014-09-25 02:51:52 +02:00
|
|
|
"goFactory": factoryName,
|
2014-05-28 01:34:41 +02:00
|
|
|
}
|
|
|
|
err = headerTemplate.Execute(buf, infoMap)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.Comment(buf.String())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = c.writeLocalBuildActions(nw, &info.actionDefs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Context) writeLocalBuildActions(nw *ninjaWriter,
|
|
|
|
defs *localBuildActions) error {
|
|
|
|
|
|
|
|
// Write the local variable assignments.
|
|
|
|
for _, v := range defs.variables {
|
|
|
|
// A localVariable doesn't need the package names or config to
|
|
|
|
// determine its name or value.
|
|
|
|
name := v.fullName(nil)
|
|
|
|
value, err := v.value(nil)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
err = nw.Assign(name, value.Value(c.pkgNames))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(defs.variables) > 0 {
|
|
|
|
err := nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the local rules.
|
|
|
|
for _, r := range defs.rules {
|
|
|
|
// A localRule doesn't need the package names or config to determine
|
|
|
|
// its name or definition.
|
|
|
|
name := r.fullName(nil)
|
|
|
|
def, err := r.def(nil)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = def.WriteTo(nw, name, c.pkgNames)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the build definitions.
|
|
|
|
for _, buildDef := range defs.buildDefs {
|
|
|
|
err := buildDef.WriteTo(nw, c.pkgNames)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(buildDef.Args) > 0 {
|
|
|
|
err = nw.BlankLine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-03-11 04:08:19 +01:00
|
|
|
func beforeInModuleList(a, b *moduleInfo, list []*moduleInfo) bool {
|
|
|
|
found := false
|
2015-11-04 01:58:48 +01:00
|
|
|
if a == b {
|
|
|
|
return false
|
|
|
|
}
|
2015-03-11 04:08:19 +01:00
|
|
|
for _, l := range list {
|
|
|
|
if l == a {
|
|
|
|
found = true
|
|
|
|
} else if l == b {
|
|
|
|
return found
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
missing := a
|
|
|
|
if found {
|
|
|
|
missing = b
|
|
|
|
}
|
|
|
|
panic(fmt.Errorf("element %v not found in list %v", missing, list))
|
|
|
|
}
|
|
|
|
|
2016-01-07 22:43:09 +01:00
|
|
|
type panicError struct {
|
|
|
|
panic interface{}
|
|
|
|
stack []byte
|
|
|
|
in string
|
|
|
|
}
|
|
|
|
|
|
|
|
func newPanicErrorf(panic interface{}, in string, a ...interface{}) error {
|
|
|
|
buf := make([]byte, 4096)
|
|
|
|
count := runtime.Stack(buf, false)
|
|
|
|
return panicError{
|
|
|
|
panic: panic,
|
|
|
|
in: fmt.Sprintf(in, a...),
|
|
|
|
stack: buf[:count],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p panicError) Error() string {
|
|
|
|
return fmt.Sprintf("panic in %s\n%s\n%s\n", p.in, p.panic, p.stack)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *panicError) addIn(in string) {
|
|
|
|
p.in += " in " + in
|
|
|
|
}
|
|
|
|
|
|
|
|
func funcName(f interface{}) string {
|
|
|
|
return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
|
|
|
|
}
|
|
|
|
|
2014-05-28 01:34:41 +02:00
|
|
|
var fileHeaderTemplate = `******************************************************************************
|
|
|
|
*** This file is generated and should not be edited ***
|
|
|
|
******************************************************************************
|
|
|
|
{{if .Pkgs}}
|
|
|
|
This file contains variables, rules, and pools with name prefixes indicating
|
|
|
|
they were generated by the following Go packages:
|
|
|
|
{{range .Pkgs}}
|
|
|
|
{{.PkgName}} [from Go package {{.PkgPath}}]{{end}}{{end}}
|
|
|
|
|
|
|
|
`
|
|
|
|
|
|
|
|
var moduleHeaderTemplate = `# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
2016-05-17 23:58:05 +02:00
|
|
|
Module: {{.name}}
|
2015-03-12 00:17:52 +01:00
|
|
|
Variant: {{.variant}}
|
2014-05-28 01:34:41 +02:00
|
|
|
Type: {{.typeName}}
|
2014-09-25 02:51:52 +02:00
|
|
|
Factory: {{.goFactory}}
|
2014-05-28 01:34:41 +02:00
|
|
|
Defined: {{.pos}}
|
|
|
|
`
|
|
|
|
|
|
|
|
var singletonHeaderTemplate = `# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
|
|
|
Singleton: {{.name}}
|
2014-09-25 02:51:52 +02:00
|
|
|
Factory: {{.goFactory}}
|
2014-05-28 01:34:41 +02:00
|
|
|
`
|