• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2017 Google Inc. All rights reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package main
16
17import (
18	"context"
19	"flag"
20	"fmt"
21	"io"
22	"io/ioutil"
23	"os"
24	"path/filepath"
25	"runtime"
26	"strings"
27	"sync"
28	"syscall"
29	"time"
30
31	"android/soong/finder"
32	"android/soong/ui/build"
33	"android/soong/ui/logger"
34	"android/soong/ui/status"
35	"android/soong/ui/terminal"
36	"android/soong/ui/tracer"
37	"android/soong/zip"
38)
39
40var numJobs = flag.Int("j", 0, "number of parallel jobs [0=autodetect]")
41
42var keepArtifacts = flag.Bool("keep", false, "keep archives of artifacts")
43var incremental = flag.Bool("incremental", false, "run in incremental mode (saving intermediates)")
44
45var outDir = flag.String("out", "", "path to store output directories (defaults to tmpdir under $OUT when empty)")
46var alternateResultDir = flag.Bool("dist", false, "write select results to $DIST_DIR (or <out>/dist when empty)")
47
48var onlyConfig = flag.Bool("only-config", false, "Only run product config (not Soong or Kati)")
49var onlySoong = flag.Bool("only-soong", false, "Only run product config and Soong (not Kati)")
50
51var buildVariant = flag.String("variant", "eng", "build variant to use")
52
53var shardCount = flag.Int("shard-count", 1, "split the products into multiple shards (to spread the build onto multiple machines, etc)")
54var shard = flag.Int("shard", 1, "1-indexed shard to execute")
55
56var skipProducts multipleStringArg
57var includeProducts multipleStringArg
58
59func init() {
60	flag.Var(&skipProducts, "skip-products", "comma-separated list of products to skip (known failures, etc)")
61	flag.Var(&includeProducts, "products", "comma-separated list of products to build")
62}
63
64// multipleStringArg is a flag.Value that takes comma separated lists and converts them to a
65// []string.  The argument can be passed multiple times to append more values.
66type multipleStringArg []string
67
68func (m *multipleStringArg) String() string {
69	return strings.Join(*m, `, `)
70}
71
72func (m *multipleStringArg) Set(s string) error {
73	*m = append(*m, strings.Split(s, ",")...)
74	return nil
75}
76
77const errorLeadingLines = 20
78const errorTrailingLines = 20
79
80func errMsgFromLog(filename string) string {
81	if filename == "" {
82		return ""
83	}
84
85	data, err := ioutil.ReadFile(filename)
86	if err != nil {
87		return ""
88	}
89
90	lines := strings.Split(strings.TrimSpace(string(data)), "\n")
91	if len(lines) > errorLeadingLines+errorTrailingLines+1 {
92		lines[errorLeadingLines] = fmt.Sprintf("... skipping %d lines ...",
93			len(lines)-errorLeadingLines-errorTrailingLines)
94
95		lines = append(lines[:errorLeadingLines+1],
96			lines[len(lines)-errorTrailingLines:]...)
97	}
98	var buf strings.Builder
99	for _, line := range lines {
100		buf.WriteString("> ")
101		buf.WriteString(line)
102		buf.WriteString("\n")
103	}
104	return buf.String()
105}
106
107// TODO(b/70370883): This tool uses a lot of open files -- over the default
108// soft limit of 1024 on some systems. So bump up to the hard limit until I fix
109// the algorithm.
110func setMaxFiles(log logger.Logger) {
111	var limits syscall.Rlimit
112
113	err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limits)
114	if err != nil {
115		log.Println("Failed to get file limit:", err)
116		return
117	}
118
119	log.Verbosef("Current file limits: %d soft, %d hard", limits.Cur, limits.Max)
120	if limits.Cur == limits.Max {
121		return
122	}
123
124	limits.Cur = limits.Max
125	err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limits)
126	if err != nil {
127		log.Println("Failed to increase file limit:", err)
128	}
129}
130
131func inList(str string, list []string) bool {
132	for _, other := range list {
133		if str == other {
134			return true
135		}
136	}
137	return false
138}
139
140func copyFile(from, to string) error {
141	fromFile, err := os.Open(from)
142	if err != nil {
143		return err
144	}
145	defer fromFile.Close()
146
147	toFile, err := os.Create(to)
148	if err != nil {
149		return err
150	}
151	defer toFile.Close()
152
153	_, err = io.Copy(toFile, fromFile)
154	return err
155}
156
157type mpContext struct {
158	Context context.Context
159	Logger  logger.Logger
160	Status  status.ToolStatus
161	Tracer  tracer.Tracer
162	Finder  *finder.Finder
163	Config  build.Config
164
165	LogsDir string
166}
167
168func main() {
169	stdio := terminal.StdioImpl{}
170
171	output := terminal.NewStatusOutput(stdio.Stdout(), "", false,
172		build.OsEnvironment().IsEnvTrue("ANDROID_QUIET_BUILD"))
173
174	log := logger.New(output)
175	defer log.Cleanup()
176
177	flag.Parse()
178
179	ctx, cancel := context.WithCancel(context.Background())
180	defer cancel()
181
182	trace := tracer.New(log)
183	defer trace.Close()
184
185	stat := &status.Status{}
186	defer stat.Finish()
187	stat.AddOutput(output)
188
189	var failures failureCount
190	stat.AddOutput(&failures)
191
192	build.SetupSignals(log, cancel, func() {
193		trace.Close()
194		log.Cleanup()
195		stat.Finish()
196	})
197
198	buildCtx := build.Context{ContextImpl: &build.ContextImpl{
199		Context: ctx,
200		Logger:  log,
201		Tracer:  trace,
202		Writer:  output,
203		Status:  stat,
204	}}
205
206	args := ""
207	if *alternateResultDir {
208		args = "dist"
209	}
210	config := build.NewConfig(buildCtx, args)
211	if *outDir == "" {
212		name := "multiproduct"
213		if !*incremental {
214			name += "-" + time.Now().Format("20060102150405")
215		}
216
217		*outDir = filepath.Join(config.OutDir(), name)
218
219		// Ensure the empty files exist in the output directory
220		// containing our output directory too. This is mostly for
221		// safety, but also triggers the ninja_build file so that our
222		// build servers know that they can parse the output as if it
223		// was ninja output.
224		build.SetupOutDir(buildCtx, config)
225
226		if err := os.MkdirAll(*outDir, 0777); err != nil {
227			log.Fatalf("Failed to create tempdir: %v", err)
228		}
229	}
230	config.Environment().Set("OUT_DIR", *outDir)
231	log.Println("Output directory:", *outDir)
232
233	logsDir := filepath.Join(config.OutDir(), "logs")
234	os.MkdirAll(logsDir, 0777)
235
236	build.SetupOutDir(buildCtx, config)
237
238	os.MkdirAll(config.LogsDir(), 0777)
239	log.SetOutput(filepath.Join(config.LogsDir(), "soong.log"))
240	trace.SetOutput(filepath.Join(config.LogsDir(), "build.trace"))
241
242	var jobs = *numJobs
243	if jobs < 1 {
244		jobs = runtime.NumCPU() / 4
245
246		ramGb := int(config.TotalRAM() / 1024 / 1024 / 1024)
247		if ramJobs := ramGb / 25; ramGb > 0 && jobs > ramJobs {
248			jobs = ramJobs
249		}
250
251		if jobs < 1 {
252			jobs = 1
253		}
254	}
255	log.Verbosef("Using %d parallel jobs", jobs)
256
257	setMaxFiles(log)
258
259	finder := build.NewSourceFinder(buildCtx, config)
260	defer finder.Shutdown()
261
262	build.FindSources(buildCtx, config, finder)
263
264	vars, err := build.DumpMakeVars(buildCtx, config, nil, []string{"all_named_products"})
265	if err != nil {
266		log.Fatal(err)
267	}
268	var productsList []string
269	allProducts := strings.Fields(vars["all_named_products"])
270
271	if len(includeProducts) > 0 {
272		var missingProducts []string
273		for _, product := range includeProducts {
274			if inList(product, allProducts) {
275				productsList = append(productsList, product)
276			} else {
277				missingProducts = append(missingProducts, product)
278			}
279		}
280		if len(missingProducts) > 0 {
281			log.Fatalf("Products don't exist: %s\n", missingProducts)
282		}
283	} else {
284		productsList = allProducts
285	}
286
287	finalProductsList := make([]string, 0, len(productsList))
288	skipProduct := func(p string) bool {
289		for _, s := range skipProducts {
290			if p == s {
291				return true
292			}
293		}
294		return false
295	}
296	for _, product := range productsList {
297		if !skipProduct(product) {
298			finalProductsList = append(finalProductsList, product)
299		} else {
300			log.Verbose("Skipping: ", product)
301		}
302	}
303
304	if *shard < 1 {
305		log.Fatalf("--shard value must be >= 1, not %d\n", *shard)
306	} else if *shardCount < 1 {
307		log.Fatalf("--shard-count value must be >= 1, not %d\n", *shardCount)
308	} else if *shard > *shardCount {
309		log.Fatalf("--shard (%d) must not be greater than --shard-count (%d)\n", *shard,
310			*shardCount)
311	} else if *shardCount > 1 {
312		finalProductsList = splitList(finalProductsList, *shardCount)[*shard-1]
313	}
314
315	log.Verbose("Got product list: ", finalProductsList)
316
317	s := buildCtx.Status.StartTool()
318	s.SetTotalActions(len(finalProductsList))
319
320	mpCtx := &mpContext{
321		Context: ctx,
322		Logger:  log,
323		Status:  s,
324		Tracer:  trace,
325
326		Finder: finder,
327		Config: config,
328
329		LogsDir: logsDir,
330	}
331
332	products := make(chan string, len(productsList))
333	go func() {
334		defer close(products)
335		for _, product := range finalProductsList {
336			products <- product
337		}
338	}()
339
340	var wg sync.WaitGroup
341	for i := 0; i < jobs; i++ {
342		wg.Add(1)
343		go func() {
344			defer wg.Done()
345			for {
346				select {
347				case product := <-products:
348					if product == "" {
349						return
350					}
351					buildProduct(mpCtx, product)
352				}
353			}
354		}()
355	}
356	wg.Wait()
357
358	if *alternateResultDir {
359		args := zip.ZipArgs{
360			FileArgs: []zip.FileArg{
361				{GlobDir: logsDir, SourcePrefixToStrip: logsDir},
362			},
363			OutputFilePath:   filepath.Join(config.RealDistDir(), "logs.zip"),
364			NumParallelJobs:  runtime.NumCPU(),
365			CompressionLevel: 5,
366		}
367		if err := zip.Zip(args); err != nil {
368			log.Fatalf("Error zipping logs: %v", err)
369		}
370	}
371
372	s.Finish()
373
374	if failures == 1 {
375		log.Fatal("1 failure")
376	} else if failures > 1 {
377		log.Fatalf("%d failures", failures)
378	} else {
379		fmt.Fprintln(output, "Success")
380	}
381}
382
383func buildProduct(mpctx *mpContext, product string) {
384	var stdLog string
385
386	outDir := filepath.Join(mpctx.Config.OutDir(), product)
387	logsDir := filepath.Join(mpctx.LogsDir, product)
388
389	if err := os.MkdirAll(outDir, 0777); err != nil {
390		mpctx.Logger.Fatalf("Error creating out directory: %v", err)
391	}
392	if err := os.MkdirAll(logsDir, 0777); err != nil {
393		mpctx.Logger.Fatalf("Error creating log directory: %v", err)
394	}
395
396	stdLog = filepath.Join(logsDir, "std.log")
397	f, err := os.Create(stdLog)
398	if err != nil {
399		mpctx.Logger.Fatalf("Error creating std.log: %v", err)
400	}
401	defer f.Close()
402
403	log := logger.New(f)
404	defer log.Cleanup()
405	log.SetOutput(filepath.Join(logsDir, "soong.log"))
406
407	action := &status.Action{
408		Description: product,
409		Outputs:     []string{product},
410	}
411	mpctx.Status.StartAction(action)
412	defer logger.Recover(func(err error) {
413		mpctx.Status.FinishAction(status.ActionResult{
414			Action: action,
415			Error:  err,
416			Output: errMsgFromLog(stdLog),
417		})
418	})
419
420	ctx := build.Context{ContextImpl: &build.ContextImpl{
421		Context: mpctx.Context,
422		Logger:  log,
423		Tracer:  mpctx.Tracer,
424		Writer:  f,
425		Thread:  mpctx.Tracer.NewThread(product),
426		Status:  &status.Status{},
427	}}
428	ctx.Status.AddOutput(terminal.NewStatusOutput(ctx.Writer, "", false,
429		build.OsEnvironment().IsEnvTrue("ANDROID_QUIET_BUILD")))
430
431	args := append([]string(nil), flag.Args()...)
432	args = append(args, "--skip-soong-tests")
433	config := build.NewConfig(ctx, args...)
434	config.Environment().Set("OUT_DIR", outDir)
435	if !*keepArtifacts {
436		config.SetEmptyNinjaFile(true)
437	}
438	build.FindSources(ctx, config, mpctx.Finder)
439	config.Lunch(ctx, product, *buildVariant)
440
441	defer func() {
442		if *keepArtifacts {
443			args := zip.ZipArgs{
444				FileArgs: []zip.FileArg{
445					{
446						GlobDir:             outDir,
447						SourcePrefixToStrip: outDir,
448					},
449				},
450				OutputFilePath:   filepath.Join(mpctx.Config.OutDir(), product+".zip"),
451				NumParallelJobs:  runtime.NumCPU(),
452				CompressionLevel: 5,
453			}
454			if err := zip.Zip(args); err != nil {
455				log.Fatalf("Error zipping artifacts: %v", err)
456			}
457		}
458		if !*incremental {
459			os.RemoveAll(outDir)
460		}
461	}()
462
463	config.SetSkipNinja(true)
464
465	buildWhat := build.RunProductConfig
466	if !*onlyConfig {
467		buildWhat |= build.RunSoong
468		if !*onlySoong {
469			buildWhat |= build.RunKati
470		}
471	}
472
473	before := time.Now()
474	build.Build(ctx, config)
475
476	// Save std_full.log if Kati re-read the makefiles
477	if buildWhat&build.RunKati != 0 {
478		if after, err := os.Stat(config.KatiBuildNinjaFile()); err == nil && after.ModTime().After(before) {
479			err := copyFile(stdLog, filepath.Join(filepath.Dir(stdLog), "std_full.log"))
480			if err != nil {
481				log.Fatalf("Error copying log file: %s", err)
482			}
483		}
484	}
485
486	mpctx.Status.FinishAction(status.ActionResult{
487		Action: action,
488	})
489}
490
491type failureCount int
492
493func (f *failureCount) StartAction(action *status.Action, counts status.Counts) {}
494
495func (f *failureCount) FinishAction(result status.ActionResult, counts status.Counts) {
496	if result.Error != nil {
497		*f += 1
498	}
499}
500
501func (f *failureCount) Message(level status.MsgLevel, message string) {
502	if level >= status.ErrorLvl {
503		*f += 1
504	}
505}
506
507func (f *failureCount) Flush() {}
508
509func (f *failureCount) Write(p []byte) (int, error) {
510	// discard writes
511	return len(p), nil
512}
513
514func splitList(list []string, shardCount int) (ret [][]string) {
515	each := len(list) / shardCount
516	extra := len(list) % shardCount
517	for i := 0; i < shardCount; i++ {
518		count := each
519		if extra > 0 {
520			count += 1
521			extra -= 1
522		}
523		ret = append(ret, list[:count])
524		list = list[count:]
525	}
526	return
527}
528