diff --git a/android/config.go b/android/config.go index 1cb543db9..88bc61ae1 100644 --- a/android/config.go +++ b/android/config.go @@ -827,6 +827,10 @@ func (c *config) UseRBE() bool { return Bool(c.productVariables.UseRBE) } +func (c *config) UseRemoteBuild() bool { + return c.UseGoma() || c.UseRBE() +} + func (c *config) RunErrorProne() bool { return c.IsEnvTrue("RUN_ERROR_PRONE") } diff --git a/android/defs.go b/android/defs.go index 4890c6618..5c815e64e 100644 --- a/android/defs.go +++ b/android/defs.go @@ -99,6 +99,9 @@ var ( // Used only when USE_GOMA=true is set, to restrict non-goma jobs to the local parallelism value localPool = blueprint.NewBuiltinPool("local_pool") + + // Used for processes that need significant RAM to ensure there are not too many running in parallel. + highmemPool = blueprint.NewBuiltinPool("highmem_pool") ) func init() { diff --git a/android/module.go b/android/module.go index 05115d678..dd0cf8cd0 100644 --- a/android/module.go +++ b/android/module.go @@ -1338,7 +1338,7 @@ func (m *moduleContext) Variable(pctx PackageContext, name, value string) { func (m *moduleContext) Rule(pctx PackageContext, name string, params blueprint.RuleParams, argNames ...string) blueprint.Rule { - if (m.config.UseGoma() || m.config.UseRBE()) && params.Pool == nil { + if m.config.UseRemoteBuild() && params.Pool == nil { // When USE_GOMA=true or USE_RBE=true are set and the rule is not supported by goma/RBE, restrict // jobs to the local parallelism value params.Pool = localPool diff --git a/android/package_ctx.go b/android/package_ctx.go index a22891081..635066767 100644 --- a/android/package_ctx.go +++ b/android/package_ctx.go @@ -109,7 +109,7 @@ func (p PackageContext) RuleFunc(name string, if len(ctx.errors) > 0 { return params, ctx.errors[0] } - if (ctx.Config().UseGoma() || ctx.Config().UseRBE()) && params.Pool == nil { + if ctx.Config().UseRemoteBuild() && params.Pool == nil { // When USE_GOMA=true or USE_RBE=true are set and the rule is not supported by // goma/RBE, restrict jobs to the local parallelism value params.Pool = localPool diff --git a/android/rule_builder.go b/android/rule_builder.go index 6f04672d5..928ba532d 100644 --- a/android/rule_builder.go +++ b/android/rule_builder.go @@ -33,6 +33,8 @@ type RuleBuilder struct { temporariesSet map[WritablePath]bool restat bool sbox bool + highmem bool + remoteable RemoteRuleSupports sboxOutDir WritablePath missingDeps []string } @@ -87,6 +89,19 @@ func (r *RuleBuilder) Restat() *RuleBuilder { return r } +// HighMem marks the rule as a high memory rule, which will limit how many run in parallel with other high memory +// rules. +func (r *RuleBuilder) HighMem() *RuleBuilder { + r.highmem = true + return r +} + +// Remoteable marks the rule as supporting remote execution. +func (r *RuleBuilder) Remoteable(supports RemoteRuleSupports) *RuleBuilder { + r.remoteable = supports + return r +} + // Sbox marks the rule as needing to be wrapped by sbox. The WritablePath should point to the output // directory that sbox will wipe. It should not be written to by any other rule. sbox will ensure // that all outputs have been written, and will discard any output files that were not specified. @@ -401,6 +416,17 @@ func (r *RuleBuilder) Build(pctx PackageContext, ctx BuilderContext, name string rspFileContent = "$in" } + var pool blueprint.Pool + if ctx.Config().UseGoma() && r.remoteable&SUPPORTS_GOMA != 0 { + // When USE_GOMA=true is set and the rule is supported by goma, allow jobs to run outside the local pool. + } else if ctx.Config().UseRBE() && r.remoteable&SUPPORTS_RBE != 0 { + // When USE_GOMA=true is set and the rule is supported by RBE, allow jobs to run outside the local pool. + } else if r.highmem { + pool = highmemPool + } else if ctx.Config().UseRemoteBuild() { + pool = localPool + } + ctx.Build(pctx, BuildParams{ Rule: ctx.Rule(pctx, name, blueprint.RuleParams{ Command: commandString, @@ -408,6 +434,7 @@ func (r *RuleBuilder) Build(pctx PackageContext, ctx BuilderContext, name string Restat: r.restat, Rspfile: rspFile, RspfileContent: rspFileContent, + Pool: pool, }), Inputs: rspFileInputs, Implicits: r.Inputs(), diff --git a/android/singleton.go b/android/singleton.go index 91268ad1c..45a9b8243 100644 --- a/android/singleton.go +++ b/android/singleton.go @@ -128,7 +128,7 @@ func (s *singletonContextAdaptor) Variable(pctx PackageContext, name, value stri } func (s *singletonContextAdaptor) Rule(pctx PackageContext, name string, params blueprint.RuleParams, argNames ...string) blueprint.Rule { - if (s.Config().UseGoma() || s.Config().UseRBE()) && params.Pool == nil { + if s.Config().UseRemoteBuild() && params.Pool == nil { // When USE_GOMA=true or USE_RBE=true are set and the rule is not supported by goma/RBE, restrict // jobs to the local parallelism value params.Pool = localPool diff --git a/cmd/soong_ui/main.go b/cmd/soong_ui/main.go index 974c644aa..db61fba5c 100644 --- a/cmd/soong_ui/main.go +++ b/cmd/soong_ui/main.go @@ -174,6 +174,10 @@ func main() { stat.AddOutput(status.NewProtoErrorLog(log, filepath.Join(logsDir, c.logsPrefix+"build_error"))) stat.AddOutput(status.NewCriticalPath(log)) + buildCtx.Verbosef("Detected %.3v GB total RAM", float32(config.TotalRAM())/(1024*1024*1024)) + buildCtx.Verbosef("Parallelism (local/remote/highmem): %v/%v/%v", + config.Parallel(), config.RemoteParallel(), config.HighmemParallel()) + defer met.Dump(filepath.Join(logsDir, c.logsPrefix+"soong_metrics")) if start, ok := os.LookupEnv("TRACE_BEGIN_SOONG"); ok { diff --git a/java/droiddoc.go b/java/droiddoc.go index f62f5f901..a10ec8173 100644 --- a/java/droiddoc.go +++ b/java/droiddoc.go @@ -1456,6 +1456,8 @@ func (d *Droidstubs) apiToXmlFlags(ctx android.ModuleContext, cmd *android.RuleB func metalavaCmd(ctx android.ModuleContext, rule *android.RuleBuilder, javaVersion javaVersion, srcs android.Paths, srcJarList android.Path, bootclasspath, classpath classpath, sourcepaths android.Paths) *android.RuleBuilderCommand { + // Metalava uses lots of memory, restrict the number of metalava jobs that can run in parallel. + rule.HighMem() cmd := rule.Command().BuiltTool(ctx, "metalava"). Flag(config.JavacVmFlags). FlagWithArg("-encoding ", "UTF-8"). diff --git a/ui/build/build.go b/ui/build/build.go index 1c2d86481..de36dce9a 100644 --- a/ui/build/build.go +++ b/ui/build/build.go @@ -46,8 +46,11 @@ func SetupOutDir(ctx Context, config Config) { var combinedBuildNinjaTemplate = template.Must(template.New("combined").Parse(` builddir = {{.OutDir}} -pool local_pool +{{if .UseRemoteBuild }}pool local_pool depth = {{.Parallel}} +{{end -}} +pool highmem_pool + depth = {{.HighmemParallel}} build _kati_always_build_: phony {{if .HasKatiSuffix}}subninja {{.KatiBuildNinjaFile}} subninja {{.KatiPackageNinjaFile}} diff --git a/ui/build/config.go b/ui/build/config.go index c0841713a..9b19ede3a 100644 --- a/ui/build/config.go +++ b/ui/build/config.go @@ -722,6 +722,33 @@ func (c *configImpl) Parallel() int { return c.parallel } +func (c *configImpl) HighmemParallel() int { + if i, ok := c.environ.GetInt("NINJA_HIGHMEM_NUM_JOBS"); ok { + return i + } + + const minMemPerHighmemProcess = 8 * 1024 * 1024 * 1024 + parallel := c.Parallel() + if c.UseRemoteBuild() { + // Ninja doesn't support nested pools, and when remote builds are enabled the total ninja parallelism + // is set very high (i.e. 500). Using a large value here would cause the total number of running jobs + // to be the sum of the sizes of the local and highmem pools, which will cause extra CPU contention. + // Return 1/16th of the size of the local pool, rounding up. + return (parallel + 15) / 16 + } else if c.totalRAM == 0 { + // Couldn't detect the total RAM, don't restrict highmem processes. + return parallel + } else if c.totalRAM <= 32*1024*1024*1024 { + // Less than 32GB of ram, restrict to 2 highmem processes + return 2 + } else if p := int(c.totalRAM / minMemPerHighmemProcess); p < parallel { + // If less than 8GB total RAM per process, reduce the number of highmem processes + return p + } + // No restriction on highmem processes + return parallel +} + func (c *configImpl) TotalRAM() uint64 { return c.totalRAM } @@ -782,10 +809,11 @@ func (c *configImpl) UseRemoteBuild() bool { // gomacc) are run in parallel. Note the parallelism of all other jobs is // still limited by Parallel() func (c *configImpl) RemoteParallel() int { - if v, ok := c.environ.Get("NINJA_REMOTE_NUM_JOBS"); ok { - if i, err := strconv.Atoi(v); err == nil { - return i - } + if !c.UseRemoteBuild() { + return 0 + } + if i, ok := c.environ.GetInt("NINJA_REMOTE_NUM_JOBS"); ok { + return i } return 500 } diff --git a/ui/build/config_darwin.go b/ui/build/config_darwin.go index 480d8d1fb..fe74e31da 100644 --- a/ui/build/config_darwin.go +++ b/ui/build/config_darwin.go @@ -22,7 +22,7 @@ import ( func detectTotalRAM(ctx Context) uint64 { s, err := syscall.Sysctl("hw.memsize") if err != nil { - ctx.Printf("Failed to get system memory size: %s") + ctx.Printf("Failed to get system memory size: %v", err) return 0 } @@ -32,7 +32,7 @@ func detectTotalRAM(ctx Context) uint64 { } if len(s) != 8 { - ctx.Printf("Failed to get system memory size, returned %d bytes, 8", len(s)) + ctx.Printf("Failed to get system memory size, returned %d bytes, expecting 8 bytes", len(s)) return 0 } diff --git a/ui/build/config_linux.go b/ui/build/config_linux.go index 9e1bdc7f7..162d372d0 100644 --- a/ui/build/config_linux.go +++ b/ui/build/config_linux.go @@ -20,9 +20,8 @@ func detectTotalRAM(ctx Context) uint64 { var info syscall.Sysinfo_t err := syscall.Sysinfo(&info) if err != nil { - ctx.Printf("Failed to get system memory size: %s") + ctx.Printf("Failed to get system memory size: %v", err) return 0 } - memBytes := uint64(info.Totalram) * uint64(info.Unit) - return memBytes + return uint64(info.Totalram) * uint64(info.Unit) } diff --git a/ui/build/environment.go b/ui/build/environment.go index d8ff7f207..9bca7c06d 100644 --- a/ui/build/environment.go +++ b/ui/build/environment.go @@ -19,6 +19,7 @@ import ( "fmt" "io" "os" + "strconv" "strings" ) @@ -44,6 +45,17 @@ func (e *Environment) Get(key string) (string, bool) { return "", false } +// Get returns the int value associated with the key, and whether it exists +// and is a valid int. +func (e *Environment) GetInt(key string) (int, bool) { + if v, ok := e.Get(key); ok { + if i, err := strconv.Atoi(v); err == nil { + return i, true + } + } + return 0, false +} + // Set sets the value associated with the key, overwriting the current value // if it exists. func (e *Environment) Set(key, value string) {