Create a highmem pool and put metalava into it

Create a highmem pool based on the total RAM and the number of CPUs,
with an override via the NINJA_HIGHMEM_NUM_JOBS variable.  Put
metalava into the highmem pool.

Ninja does not support nested pools, and when goma or RBE is enabled
the maximum ninja parallelism is set very high with local jobs in a
local pool.  When both the local pool and highmem pool are enabled,
the total number of local jobs will be as high as the sum of the sizes
of the two pools.  Keep the highmem pool limited to 1/16th of the
local pool when remote builds are enabled to try to minimize the
effect while still limiting highmem jobs.

Fixes: 142644983
Test: m nothing, examine pools
Test: m USE_GOMA=true nothing, examine pools
Change-Id: Id79f11f44948992960ac34ecf831dacbe21bd332
This commit is contained in:
Colin Cross 2019-11-15 13:18:43 -08:00
parent b1d8c99e12
commit 8b8bec3b3a
13 changed files with 95 additions and 13 deletions

View file

@ -827,6 +827,10 @@ func (c *config) UseRBE() bool {
return Bool(c.productVariables.UseRBE)
}
func (c *config) UseRemoteBuild() bool {
return c.UseGoma() || c.UseRBE()
}
func (c *config) RunErrorProne() bool {
return c.IsEnvTrue("RUN_ERROR_PRONE")
}

View file

@ -99,6 +99,9 @@ var (
// Used only when USE_GOMA=true is set, to restrict non-goma jobs to the local parallelism value
localPool = blueprint.NewBuiltinPool("local_pool")
// Used for processes that need significant RAM to ensure there are not too many running in parallel.
highmemPool = blueprint.NewBuiltinPool("highmem_pool")
)
func init() {

View file

@ -1338,7 +1338,7 @@ func (m *moduleContext) Variable(pctx PackageContext, name, value string) {
func (m *moduleContext) Rule(pctx PackageContext, name string, params blueprint.RuleParams,
argNames ...string) blueprint.Rule {
if (m.config.UseGoma() || m.config.UseRBE()) && params.Pool == nil {
if m.config.UseRemoteBuild() && params.Pool == nil {
// When USE_GOMA=true or USE_RBE=true are set and the rule is not supported by goma/RBE, restrict
// jobs to the local parallelism value
params.Pool = localPool

View file

@ -109,7 +109,7 @@ func (p PackageContext) RuleFunc(name string,
if len(ctx.errors) > 0 {
return params, ctx.errors[0]
}
if (ctx.Config().UseGoma() || ctx.Config().UseRBE()) && params.Pool == nil {
if ctx.Config().UseRemoteBuild() && params.Pool == nil {
// When USE_GOMA=true or USE_RBE=true are set and the rule is not supported by
// goma/RBE, restrict jobs to the local parallelism value
params.Pool = localPool

View file

@ -33,6 +33,8 @@ type RuleBuilder struct {
temporariesSet map[WritablePath]bool
restat bool
sbox bool
highmem bool
remoteable RemoteRuleSupports
sboxOutDir WritablePath
missingDeps []string
}
@ -87,6 +89,19 @@ func (r *RuleBuilder) Restat() *RuleBuilder {
return r
}
// HighMem marks the rule as a high memory rule, which will limit how many run in parallel with other high memory
// rules.
func (r *RuleBuilder) HighMem() *RuleBuilder {
r.highmem = true
return r
}
// Remoteable marks the rule as supporting remote execution.
func (r *RuleBuilder) Remoteable(supports RemoteRuleSupports) *RuleBuilder {
r.remoteable = supports
return r
}
// Sbox marks the rule as needing to be wrapped by sbox. The WritablePath should point to the output
// directory that sbox will wipe. It should not be written to by any other rule. sbox will ensure
// that all outputs have been written, and will discard any output files that were not specified.
@ -401,6 +416,17 @@ func (r *RuleBuilder) Build(pctx PackageContext, ctx BuilderContext, name string
rspFileContent = "$in"
}
var pool blueprint.Pool
if ctx.Config().UseGoma() && r.remoteable&SUPPORTS_GOMA != 0 {
// When USE_GOMA=true is set and the rule is supported by goma, allow jobs to run outside the local pool.
} else if ctx.Config().UseRBE() && r.remoteable&SUPPORTS_RBE != 0 {
// When USE_GOMA=true is set and the rule is supported by RBE, allow jobs to run outside the local pool.
} else if r.highmem {
pool = highmemPool
} else if ctx.Config().UseRemoteBuild() {
pool = localPool
}
ctx.Build(pctx, BuildParams{
Rule: ctx.Rule(pctx, name, blueprint.RuleParams{
Command: commandString,
@ -408,6 +434,7 @@ func (r *RuleBuilder) Build(pctx PackageContext, ctx BuilderContext, name string
Restat: r.restat,
Rspfile: rspFile,
RspfileContent: rspFileContent,
Pool: pool,
}),
Inputs: rspFileInputs,
Implicits: r.Inputs(),

View file

@ -128,7 +128,7 @@ func (s *singletonContextAdaptor) Variable(pctx PackageContext, name, value stri
}
func (s *singletonContextAdaptor) Rule(pctx PackageContext, name string, params blueprint.RuleParams, argNames ...string) blueprint.Rule {
if (s.Config().UseGoma() || s.Config().UseRBE()) && params.Pool == nil {
if s.Config().UseRemoteBuild() && params.Pool == nil {
// When USE_GOMA=true or USE_RBE=true are set and the rule is not supported by goma/RBE, restrict
// jobs to the local parallelism value
params.Pool = localPool

View file

@ -174,6 +174,10 @@ func main() {
stat.AddOutput(status.NewProtoErrorLog(log, filepath.Join(logsDir, c.logsPrefix+"build_error")))
stat.AddOutput(status.NewCriticalPath(log))
buildCtx.Verbosef("Detected %.3v GB total RAM", float32(config.TotalRAM())/(1024*1024*1024))
buildCtx.Verbosef("Parallelism (local/remote/highmem): %v/%v/%v",
config.Parallel(), config.RemoteParallel(), config.HighmemParallel())
defer met.Dump(filepath.Join(logsDir, c.logsPrefix+"soong_metrics"))
if start, ok := os.LookupEnv("TRACE_BEGIN_SOONG"); ok {

View file

@ -1456,6 +1456,8 @@ func (d *Droidstubs) apiToXmlFlags(ctx android.ModuleContext, cmd *android.RuleB
func metalavaCmd(ctx android.ModuleContext, rule *android.RuleBuilder, javaVersion javaVersion, srcs android.Paths,
srcJarList android.Path, bootclasspath, classpath classpath, sourcepaths android.Paths) *android.RuleBuilderCommand {
// Metalava uses lots of memory, restrict the number of metalava jobs that can run in parallel.
rule.HighMem()
cmd := rule.Command().BuiltTool(ctx, "metalava").
Flag(config.JavacVmFlags).
FlagWithArg("-encoding ", "UTF-8").

View file

@ -46,8 +46,11 @@ func SetupOutDir(ctx Context, config Config) {
var combinedBuildNinjaTemplate = template.Must(template.New("combined").Parse(`
builddir = {{.OutDir}}
pool local_pool
{{if .UseRemoteBuild }}pool local_pool
depth = {{.Parallel}}
{{end -}}
pool highmem_pool
depth = {{.HighmemParallel}}
build _kati_always_build_: phony
{{if .HasKatiSuffix}}subninja {{.KatiBuildNinjaFile}}
subninja {{.KatiPackageNinjaFile}}

View file

@ -722,6 +722,33 @@ func (c *configImpl) Parallel() int {
return c.parallel
}
func (c *configImpl) HighmemParallel() int {
if i, ok := c.environ.GetInt("NINJA_HIGHMEM_NUM_JOBS"); ok {
return i
}
const minMemPerHighmemProcess = 8 * 1024 * 1024 * 1024
parallel := c.Parallel()
if c.UseRemoteBuild() {
// Ninja doesn't support nested pools, and when remote builds are enabled the total ninja parallelism
// is set very high (i.e. 500). Using a large value here would cause the total number of running jobs
// to be the sum of the sizes of the local and highmem pools, which will cause extra CPU contention.
// Return 1/16th of the size of the local pool, rounding up.
return (parallel + 15) / 16
} else if c.totalRAM == 0 {
// Couldn't detect the total RAM, don't restrict highmem processes.
return parallel
} else if c.totalRAM <= 32*1024*1024*1024 {
// Less than 32GB of ram, restrict to 2 highmem processes
return 2
} else if p := int(c.totalRAM / minMemPerHighmemProcess); p < parallel {
// If less than 8GB total RAM per process, reduce the number of highmem processes
return p
}
// No restriction on highmem processes
return parallel
}
func (c *configImpl) TotalRAM() uint64 {
return c.totalRAM
}
@ -782,10 +809,11 @@ func (c *configImpl) UseRemoteBuild() bool {
// gomacc) are run in parallel. Note the parallelism of all other jobs is
// still limited by Parallel()
func (c *configImpl) RemoteParallel() int {
if v, ok := c.environ.Get("NINJA_REMOTE_NUM_JOBS"); ok {
if i, err := strconv.Atoi(v); err == nil {
return i
}
if !c.UseRemoteBuild() {
return 0
}
if i, ok := c.environ.GetInt("NINJA_REMOTE_NUM_JOBS"); ok {
return i
}
return 500
}

View file

@ -22,7 +22,7 @@ import (
func detectTotalRAM(ctx Context) uint64 {
s, err := syscall.Sysctl("hw.memsize")
if err != nil {
ctx.Printf("Failed to get system memory size: %s")
ctx.Printf("Failed to get system memory size: %v", err)
return 0
}
@ -32,7 +32,7 @@ func detectTotalRAM(ctx Context) uint64 {
}
if len(s) != 8 {
ctx.Printf("Failed to get system memory size, returned %d bytes, 8", len(s))
ctx.Printf("Failed to get system memory size, returned %d bytes, expecting 8 bytes", len(s))
return 0
}

View file

@ -20,9 +20,8 @@ func detectTotalRAM(ctx Context) uint64 {
var info syscall.Sysinfo_t
err := syscall.Sysinfo(&info)
if err != nil {
ctx.Printf("Failed to get system memory size: %s")
ctx.Printf("Failed to get system memory size: %v", err)
return 0
}
memBytes := uint64(info.Totalram) * uint64(info.Unit)
return memBytes
return uint64(info.Totalram) * uint64(info.Unit)
}

View file

@ -19,6 +19,7 @@ import (
"fmt"
"io"
"os"
"strconv"
"strings"
)
@ -44,6 +45,17 @@ func (e *Environment) Get(key string) (string, bool) {
return "", false
}
// Get returns the int value associated with the key, and whether it exists
// and is a valid int.
func (e *Environment) GetInt(key string) (int, bool) {
if v, ok := e.Get(key); ok {
if i, err := strconv.Atoi(v); err == nil {
return i, true
}
}
return 0, false
}
// Set sets the value associated with the key, overwriting the current value
// if it exists.
func (e *Environment) Set(key, value string) {