mirror of
				https://github.com/termux/termux-packages.git
				synced 2025-10-31 21:36:01 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			457 lines
		
	
	
		
			17 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			457 lines
		
	
	
		
			17 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| From 66bf40af561476efd3123a4d0bb9f186c4b220e8 Mon Sep 17 00:00:00 2001
 | |
| From: Chongyun Lee <licy183@termux.dev>
 | |
| Date: Wed, 16 Apr 2025 01:01:04 +0800
 | |
| Subject: [PATCH] reland jumbo scripts
 | |
| 
 | |
| ---
 | |
|  build/config/jumbo.gni          | 287 ++++++++++++++++++++++++++++++++
 | |
|  build/config/merge_for_jumbo.py | 145 ++++++++++++++++
 | |
|  2 files changed, 432 insertions(+)
 | |
|  create mode 100644 build/config/jumbo.gni
 | |
|  create mode 100644 build/config/merge_for_jumbo.py
 | |
| 
 | |
| diff --git a/build/config/jumbo.gni b/build/config/jumbo.gni
 | |
| new file mode 100644
 | |
| index 0000000000..3d07646aee
 | |
| --- /dev/null
 | |
| +++ b/build/config/jumbo.gni
 | |
| @@ -0,0 +1,287 @@
 | |
| +# Copyright 2017 The Chromium Authors. All rights reserved.
 | |
| +# Use of this source code is governed by a BSD-style license that can be
 | |
| +# found in the LICENSE file.
 | |
| +
 | |
| +import("//build/config/nacl/config.gni")  # To see if jumbo should be turned off
 | |
| +
 | |
| +declare_args() {
 | |
| +  # If true, use a jumbo build (files compiled together) to speed up
 | |
| +  # compilation.
 | |
| +  use_jumbo_build = false
 | |
| +
 | |
| +  # A list of build targets to exclude from jumbo builds, for optimal
 | |
| +  # round trip time when frequently changing a set of cpp files. The
 | |
| +  # targets can be just the short name (in which case it matches any
 | |
| +  # target with that name), a directory prefixed with the root
 | |
| +  # specifier //, or a full build target label.
 | |
| +  #
 | |
| +  # Example:
 | |
| +  # These would all exclude the "browser" target in a file
 | |
| +  # content/browser/BUILD.gn, and potentially more.
 | |
| +  #
 | |
| +  # jumbo_build_excluded = [ "browser" ]
 | |
| +  # jumbo_build_excluded = [ "//content/browser" ]
 | |
| +  # jumbo_build_excluded = [ "//content/browser:browser" ]
 | |
| +  jumbo_build_excluded = []
 | |
| +
 | |
| +  # How many files to group on average. Smaller numbers give more
 | |
| +  # parallellism, higher numbers give less total CPU usage. Higher
 | |
| +  # numbers also give longer single-file recompilation times.
 | |
| +  #
 | |
| +  # Recommendations:
 | |
| +  # Higher numbers than 100 does not reduce wall clock compile times
 | |
| +  # even for 4 cores or less so no reason to go higher than 100.
 | |
| +  # Going from 50 to 100 with a 4 core CPU saves about 3% CPU time and
 | |
| +  # 3% wall clock time in a tree with blink, v8 and content
 | |
| +  # jumbofied. At the same time it increases the compile time for the
 | |
| +  # largest jumbo chunks by 10-20% and reduces the chance to use all
 | |
| +  # available CPU cores. So set the default to 50 to balance between
 | |
| +  # high and low-core build performance. -1 means do the default.
 | |
| +  jumbo_file_merge_limit = -1
 | |
| +}
 | |
| +
 | |
| +# Normal builds benefit from lots of jumbification
 | |
| +jumbo_file_merge_default = 50
 | |
| +
 | |
| +# Use one of the targets jumbo_source_set, jumbo_static_library, or
 | |
| +# jumbo_component to generate a target which merges sources if possible to
 | |
| +# compile much faster.
 | |
| +#
 | |
| +# Special values.
 | |
| +#
 | |
| +#   target_type
 | |
| +#      The kind of target to build. For example the string
 | |
| +#      "static_library".
 | |
| +#
 | |
| +#   always_build_jumbo
 | |
| +#      If set and set to true, then use jumbo compile even when it is
 | |
| +#      globally disabled. Otherwise it has no effect.
 | |
| +#
 | |
| +#   never_build_jumbo
 | |
| +#      If set and set to true, then do not jumbo compile even if it is
 | |
| +#      globally enabled. Otherwise it has no effect.
 | |
| +#
 | |
| +#   jumbo_excluded_sources
 | |
| +#      If set to a list of files, those files will not be merged with
 | |
| +#      the rest. This can be necessary if merging the files causes
 | |
| +#      compilation issues and fixing the issues is impractical.
 | |
| +template("internal_jumbo_target") {
 | |
| +  use_jumbo_build_for_target = use_jumbo_build
 | |
| +  if (defined(invoker.always_build_jumbo) && invoker.always_build_jumbo) {
 | |
| +    use_jumbo_build_for_target = true
 | |
| +  }
 | |
| +  if (defined(invoker.never_build_jumbo) && invoker.never_build_jumbo) {
 | |
| +    use_jumbo_build_for_target = false
 | |
| +  }
 | |
| +  if (is_nacl_irt) {
 | |
| +    # The code is barely compatible with the nacl toolchain anymore and we
 | |
| +    # don't want to stress it further with jumbo compilation units.
 | |
| +    use_jumbo_build_for_target = false
 | |
| +  }
 | |
| +
 | |
| +  foreach(excluded_target, jumbo_build_excluded) {
 | |
| +    if (excluded_target == target_name ||
 | |
| +        excluded_target == get_label_info(":" + target_name, "dir") ||
 | |
| +        excluded_target ==
 | |
| +        get_label_info(":" + target_name, "label_no_toolchain")) {
 | |
| +      use_jumbo_build_for_target = false
 | |
| +    }
 | |
| +  }
 | |
| +
 | |
| +  excluded_sources = []
 | |
| +  if (defined(invoker.jumbo_excluded_sources)) {
 | |
| +    excluded_sources = invoker.jumbo_excluded_sources
 | |
| +  }
 | |
| +
 | |
| +  if (defined(invoker.sources)) {
 | |
| +    invoker_sources = invoker.sources
 | |
| +  } else {
 | |
| +    invoker_sources = []
 | |
| +  }
 | |
| +
 | |
| +  gen_target_dir = invoker.target_gen_dir
 | |
| +
 | |
| +  not_needed([ "gen_target_dir" ])  # Prevent "unused variable".
 | |
| +
 | |
| +  if (use_jumbo_build_for_target) {
 | |
| +    jumbo_files = []
 | |
| +
 | |
| +    # Split the sources list into chunks that are not excessively large
 | |
| +    current_file_index = 0
 | |
| +    next_chunk_start = 0
 | |
| +    next_chunk_number = 1
 | |
| +    merge_limit = jumbo_file_merge_limit
 | |
| +    if (merge_limit == -1) {
 | |
| +      merge_limit = jumbo_file_merge_default
 | |
| +    }
 | |
| +    has_c_file = false
 | |
| +    has_objective_c_file = false
 | |
| +    sources_in_jumbo_files = []
 | |
| +    assert(merge_limit > 0)
 | |
| +    foreach(source_file, invoker_sources) {
 | |
| +      source_ext = get_path_info(source_file, "extension")
 | |
| +      is_source_file = true
 | |
| +      if (source_ext == "c") {
 | |
| +        has_c_file = true
 | |
| +      } else if (source_ext == "mm") {
 | |
| +        has_objective_c_file = true
 | |
| +      } else if (source_ext == "cc" || source_ext == "cpp") {
 | |
| +        if (current_file_index == next_chunk_start) {
 | |
| +          jumbo_files += [ "$gen_target_dir/" + target_name + "_jumbo_" +
 | |
| +                           next_chunk_number + ".cc" ]
 | |
| +          next_chunk_number += 1
 | |
| +          next_chunk_start += merge_limit
 | |
| +        }
 | |
| +        current_file_index += 1
 | |
| +      } else {
 | |
| +        is_source_file = false
 | |
| +      }
 | |
| +      if (is_source_file) {
 | |
| +        sources_in_jumbo_files += [ source_file ]
 | |
| +      }
 | |
| +    }
 | |
| +
 | |
| +    if (jumbo_files == [] || current_file_index == 1) {
 | |
| +      # Empty sources list or a sources list with only header files or
 | |
| +      # at most one non-header file.
 | |
| +      use_jumbo_build_for_target = false
 | |
| +      not_needed([
 | |
| +                   "sources_in_jumbo_files",
 | |
| +                   "current_file_index",
 | |
| +                   "next_chunk_start",
 | |
| +                   "next_chunk_number",
 | |
| +                 ])
 | |
| +    }
 | |
| +
 | |
| +    if (has_c_file) {
 | |
| +      jumbo_files += [ "$gen_target_dir/" + target_name + "_jumbo_c.c" ]
 | |
| +    }
 | |
| +    if (has_objective_c_file) {
 | |
| +      jumbo_files += [ "$gen_target_dir/" + target_name + "_jumbo_mm.mm" ]
 | |
| +    }
 | |
| +  }
 | |
| +
 | |
| +  if (use_jumbo_build_for_target) {
 | |
| +    merge_action_name = target_name + "__jumbo_merge"
 | |
| +    sources_in_jumbo_files -= excluded_sources
 | |
| +
 | |
| +    # Create an action that calls a script that merges all the source files.
 | |
| +    action(merge_action_name) {
 | |
| +      script = "//build/config/merge_for_jumbo.py"
 | |
| +      response_file_contents =
 | |
| +          rebase_path(sources_in_jumbo_files, root_build_dir)
 | |
| +      outputs = jumbo_files
 | |
| +      args = [ "--outputs" ] + rebase_path(outputs, root_build_dir) +
 | |
| +             [ "--file-list={{response_file_name}}" ]
 | |
| +
 | |
| +      # For the "gn analyze" step to work, gn needs to know about the
 | |
| +      # original source files. They can't be in |sources| because then
 | |
| +      # they will be compiled, so they have to be somewhere else where
 | |
| +      # gn analyze looks. One alternative is the |data| list but that
 | |
| +      # will affect test packaging with known bad effects on
 | |
| +      # distributed testing. Putting them in this action's input list
 | |
| +      # is the least bad place.
 | |
| +      inputs = []
 | |
| +      foreach(f, invoker_sources - excluded_sources) {
 | |
| +        # Avoid generated files and non non-source files.
 | |
| +        in_source_tree = string_replace(rebase_path(f),
 | |
| +                                        rebase_path(root_out_dir),
 | |
| +                                        "dummy") == rebase_path(f)
 | |
| +        is_source_file = get_path_info(f, "extension") == "cc" ||
 | |
| +                         get_path_info(f, "extension") == "cpp" ||
 | |
| +                         get_path_info(f, "extension") == "c" ||
 | |
| +                         get_path_info(f, "extension") == "mm"
 | |
| +        if (in_source_tree && is_source_file) {
 | |
| +          inputs += [ f ]
 | |
| +        }
 | |
| +      }
 | |
| +    }
 | |
| +  } else {
 | |
| +    # If the list subtraction triggers a gn error,
 | |
| +    # jumbo_excluded_sources lists a file that is not in sources.
 | |
| +    sources_after_exclusion = invoker_sources - excluded_sources
 | |
| +    not_needed([ "sources_after_exclusion" ])
 | |
| +  }
 | |
| +
 | |
| +  target_type = invoker.target_type
 | |
| +
 | |
| +  # Perform the actual operation, either on the original sources or
 | |
| +  # the sources post-jumbo merging.
 | |
| +  target(target_type, target_name) {
 | |
| +    deps = []
 | |
| +    if (defined(invoker.deps)) {
 | |
| +      deps += invoker.deps
 | |
| +    }
 | |
| +
 | |
| +    # Take everything else not handled above from the invoker.
 | |
| +    variables_to_not_forward = [ "deps" ]
 | |
| +    if (use_jumbo_build_for_target) {
 | |
| +      deps += [ ":" + merge_action_name ]
 | |
| +      variables_to_not_forward += [ "sources" ]
 | |
| +      assert(jumbo_files != [])
 | |
| +      sources = invoker_sources - sources_in_jumbo_files + jumbo_files
 | |
| +
 | |
| +      # Change include_dirs to make sure that the jumbo file can find its
 | |
| +      # #included files.
 | |
| +      variables_to_not_forward += [ "include_dirs" ]
 | |
| +      include_dirs = []
 | |
| +      if (defined(invoker.include_dirs)) {
 | |
| +        include_dirs = invoker.include_dirs
 | |
| +      }
 | |
| +      include_dirs += [ root_build_dir ]
 | |
| +    }
 | |
| +    forward_variables_from(invoker, "*", variables_to_not_forward)
 | |
| +  }
 | |
| +}
 | |
| +
 | |
| +# See documentation above by "internal_jumbo_target".
 | |
| +template("jumbo_source_set") {
 | |
| +  internal_jumbo_target(target_name) {
 | |
| +    target_type = "source_set"
 | |
| +    forward_variables_from(invoker, "*")
 | |
| +  }
 | |
| +}
 | |
| +
 | |
| +set_defaults("jumbo_source_set") {
 | |
| +  # This sets the default list of configs when the jumbo_source_set target
 | |
| +  # is defined. The default_compiler_configs comes from BUILDCONFIG.gn and
 | |
| +  # is the list normally applied to static libraries and source sets.
 | |
| +  configs = default_compiler_configs
 | |
| +}
 | |
| +
 | |
| +# See documentation above by "internal_jumbo_target".
 | |
| +template("jumbo_static_library") {
 | |
| +  internal_jumbo_target(target_name) {
 | |
| +    target_type = "static_library"
 | |
| +    forward_variables_from(invoker, "*")
 | |
| +  }
 | |
| +}
 | |
| +
 | |
| +set_defaults("jumbo_static_library") {
 | |
| +  # This sets the default list of configs when the jumbo_static_library target
 | |
| +  # is defined. The default_compiler_configs comes from BUILDCONFIG.gn and
 | |
| +  # is the list normally applied to static libraries and source sets.
 | |
| +  configs = default_compiler_configs
 | |
| +}
 | |
| +
 | |
| +# See documentation above by "internal_jumbo_target".
 | |
| +template("jumbo_component") {
 | |
| +  internal_jumbo_target(target_name) {
 | |
| +    target_type = "component"
 | |
| +    forward_variables_from(invoker, "*")
 | |
| +  }
 | |
| +}
 | |
| +
 | |
| +set_defaults("jumbo_component") {
 | |
| +  # This sets the default list of configs when the jumbo_component
 | |
| +  # target is defined. This code is a clone of set_defaults for the
 | |
| +  # ordinary "component" template.
 | |
| +  if (is_component_build) {
 | |
| +    configs = default_shared_library_configs
 | |
| +    if (is_android) {
 | |
| +      configs -= [ "//build/config/android:hide_all_but_jni_onload" ]
 | |
| +    }
 | |
| +  } else {
 | |
| +    configs = default_compiler_configs
 | |
| +  }
 | |
| +}
 | |
| diff --git a/build/config/merge_for_jumbo.py b/build/config/merge_for_jumbo.py
 | |
| new file mode 100644
 | |
| index 0000000000..6d037a80eb
 | |
| --- /dev/null
 | |
| +++ b/build/config/merge_for_jumbo.py
 | |
| @@ -0,0 +1,145 @@
 | |
| +#!/usr/bin/env python
 | |
| +#
 | |
| +# Copyright 2016 The Chromium Authors. All rights reserved.
 | |
| +# Use of this source code is governed by a BSD-style license that can be
 | |
| +# found in the LICENSE file.
 | |
| +
 | |
| +"""This script creates a "jumbo" file which merges all incoming files
 | |
| +for compiling.
 | |
| +
 | |
| +"""
 | |
| +
 | |
| +from __future__ import print_function
 | |
| +from __future__ import unicode_literals
 | |
| +
 | |
| +import argparse
 | |
| +import hashlib
 | |
| +import io
 | |
| +import os
 | |
| +
 | |
| +def cut_ranges(boundaries):
 | |
| +  # Given an increasing sequence of boundary indices, generate a sequence of
 | |
| +  # non-overlapping ranges. The total range is inclusive of the first index
 | |
| +  # and exclusive of the last index from the given sequence.
 | |
| +  for start, stop in zip(boundaries, boundaries[1:]):
 | |
| +    yield range(start, stop)
 | |
| +
 | |
| +
 | |
| +def generate_chunk_stops(inputs, output_count, smart_merge=True):
 | |
| +  # Note: In the comments below, unique numeric labels are assigned to files.
 | |
| +  #       Consider them as the sorted rank of the hash of each file path.
 | |
| +  # Simple jumbo chunking generates uniformly sized chunks with the ceiling of:
 | |
| +  # (output_index + 1) * input_count / output_count
 | |
| +  input_count = len(inputs)
 | |
| +  stops = [((i + 1) * input_count + output_count - 1) // output_count
 | |
| +           for i in range(output_count)]
 | |
| +  # This is disruptive at times because file insertions and removals can
 | |
| +  # invalidate many chunks as all files are offset by one.
 | |
| +  # For example, say we have 12 files in 4 uniformly sized chunks:
 | |
| +  # 9, 4, 0; 7,  1, 11;  5, 10, 2; 6, 3, 8
 | |
| +  # If we delete the first file we get:
 | |
| +  # 4, 0, 7; 1, 11,  5; 10,  2, 6; 3, 8
 | |
| +  # All of the chunks have new sets of inputs.
 | |
| +
 | |
| +  # With path-aware chunking, we start with the uniformly sized chunks:
 | |
| +  # 9, 4, 0; 7,  1, 11;  5, 10, 2; 6, 3, 8
 | |
| +  # First we find the smallest rank in each of the chunks. Their indices are
 | |
| +  # stored in the |centers| list and in this example the ranks would be:
 | |
| +  # 0, 1, 2, 3
 | |
| +  # Then we find the largest rank between the centers. Their indices are stored
 | |
| +  # in the |stops| list and in this example the ranks would be:
 | |
| +  # 7, 11, 6
 | |
| +  # These files mark the boundaries between chunks and these boundary files are
 | |
| +  # often maintained even as files are added or deleted.
 | |
| +  # In this example, 7, 11, and 6 are the first files in each chunk:
 | |
| +  # 9, 4, 0; 7,  1; 11,  5, 10, 2; 6, 3, 8
 | |
| +  # If we delete the first file and repeat the process we get:
 | |
| +  # 4, 0; 7, 1; 11,  5, 10,  2; 6, 3, 8
 | |
| +  # Only the first chunk has a new set of inputs.
 | |
| +  if smart_merge:
 | |
| +    # Starting with the simple chunks, every file is assigned a rank.
 | |
| +    # This requires a hash function that is stable across runs.
 | |
| +    hasher = lambda n: hashlib.md5(inputs[n].encode()).hexdigest()
 | |
| +    # In each chunk there is a key file with lowest rank; mark them.
 | |
| +    # Note that they will not easily change.
 | |
| +    centers = [min(indices, key=hasher) for indices in cut_ranges([0] + stops)]
 | |
| +    # Between each pair of key files there is a file with highest rank.
 | |
| +    # Mark these to be used as border files. They also will not easily change.
 | |
| +    # Forget the inital chunks and create new chunks by splitting the list at
 | |
| +    # every border file.
 | |
| +    stops = [max(indices, key=hasher) for indices in cut_ranges(centers)]
 | |
| +    stops.append(input_count)
 | |
| +  return stops
 | |
| +
 | |
| +
 | |
| +def write_jumbo_files(inputs, outputs, written_input_set, written_output_set):
 | |
| +  chunk_stops = generate_chunk_stops(inputs, len(outputs))
 | |
| +
 | |
| +  written_inputs = 0
 | |
| +  for output_index, output_file in enumerate(outputs):
 | |
| +    written_output_set.add(output_file)
 | |
| +    if os.path.isfile(output_file):
 | |
| +      with open(output_file, "r") as current:
 | |
| +        current_jumbo_file = current.read()
 | |
| +    else:
 | |
| +      current_jumbo_file = None
 | |
| +
 | |
| +    out = io.StringIO()
 | |
| +    out.write("/* This is a Jumbo file. Don't edit. */\n\n")
 | |
| +    out.write("/* Generated with merge_for_jumbo.py. */\n\n")
 | |
| +    input_limit = chunk_stops[output_index]
 | |
| +    while written_inputs < input_limit:
 | |
| +      filename = inputs[written_inputs]
 | |
| +      written_inputs += 1
 | |
| +      out.write("#include \"%s\"\n" % filename)
 | |
| +      written_input_set.add(filename)
 | |
| +    new_jumbo_file = out.getvalue()
 | |
| +    out.close()
 | |
| +
 | |
| +    if new_jumbo_file != current_jumbo_file:
 | |
| +      with open(output_file, "w") as out:
 | |
| +        out.write(new_jumbo_file)
 | |
| +
 | |
| +
 | |
| +def main():
 | |
| +  parser = argparse.ArgumentParser()
 | |
| +  parser.add_argument("--outputs", nargs="+", required=True,
 | |
| +                      help='List of output files to split input into')
 | |
| +  parser.add_argument("--file-list", required=True)
 | |
| +  parser.add_argument("--verbose", action="store_true")
 | |
| +  args = parser.parse_args()
 | |
| +
 | |
| +  lines = []
 | |
| +  # If written with gn |write_file| each file is on its own line.
 | |
| +  with open(args.file_list) as file_list_file:
 | |
| +    lines = [line.strip() for line in file_list_file if line.strip()]
 | |
| +  # If written with gn |response_file_contents| the files are space separated.
 | |
| +  all_inputs = []
 | |
| +  for line in lines:
 | |
| +    all_inputs.extend(line.split())
 | |
| +
 | |
| +  written_output_set = set()  # Just for double checking
 | |
| +  written_input_set = set()  # Just for double checking
 | |
| +  for language_ext in (".cc", ".c", ".mm",):
 | |
| +    if language_ext == ".cc":
 | |
| +      ext_pattern = (".cc", ".cpp")
 | |
| +    else:
 | |
| +      ext_pattern = tuple([language_ext])
 | |
| +
 | |
| +    outputs = [x for x in args.outputs if x.endswith(ext_pattern)]
 | |
| +    inputs = [x for x in all_inputs if x.endswith(ext_pattern)]
 | |
| +
 | |
| +    if not outputs:
 | |
| +      assert not inputs
 | |
| +      continue
 | |
| +
 | |
| +    write_jumbo_files(inputs, outputs, written_input_set, written_output_set)
 | |
| +
 | |
| +  assert set(args.outputs) == written_output_set, "Did not fill all outputs"
 | |
| +  assert set(all_inputs) == written_input_set, "Did not use all inputs"
 | |
| +  if args.verbose:
 | |
| +    print("Generated %s (%d files) based on %s" % (
 | |
| +      str(args.outputs), len(written_input_set), args.file_list))
 | |
| +
 | |
| +if __name__ == "__main__":
 | |
| +  main()
 |