0
0
mirror of https://github.com/termux/termux-packages.git synced 2025-06-03 17:45:16 +00:00
Files
2025-05-03 13:51:22 +08:00

1073 lines
43 KiB
Diff

From 2d8121323286f062413f3dce1540665b95740d45 Mon Sep 17 00:00:00 2001
From: Chongyun Lee <licy183@termux.dev>
Date: Thu, 17 Apr 2025 03:03:03 +0800
Subject: [PATCH] reland jumbo 4
Enable jumbo build for the following template(s):
- //v8/gni/v8.gni -> template("v8_source_set")
- //v8/gni/v8.gni -> template("v8_header_set")
---
v8/BUILD.gn | 104 ++++++++++++++++++
v8/gni/v8.gni | 7 +-
.../baseline/arm/baseline-compiler-arm-inl.h | 2 +
v8/src/baseline/baseline-compiler.cc | 2 +
v8/src/compiler/backend/instruction.h | 4 +-
.../turboshaft/loop-unrolling-reducer.cc | 6 +-
.../wasm-in-js-inlining-reducer-inl.h | 2 +
.../wasm-load-elimination-reducer.h | 1 +
v8/src/compiler/wasm-compiler.cc | 6 +-
v8/src/diagnostics/perf-jit.cc | 2 +
v8/src/handles/global-handles.cc | 4 +-
v8/src/heap/code-range.cc | 14 +--
v8/src/heap/cppgc/concurrent-marker.cc | 4 +
v8/src/heap/cppgc/marker.cc | 4 +
v8/src/heap/minor-mark-sweep.cc | 6 +-
v8/src/inspector/BUILD.gn | 1 +
v8/src/inspector/value-mirror.cc | 10 +-
.../default-thread-isolated-allocator.cc | 2 +
v8/src/maglev/arm/maglev-ir-arm.cc | 2 +
v8/src/maglev/maglev-assembler-inl.h | 4 +-
v8/src/maglev/maglev-assembler.cc | 2 +
v8/src/maglev/maglev-code-generator.cc | 4 +-
v8/src/maglev/maglev-ir.cc | 2 +
v8/src/objects/js-atomics-synchronization.h | 8 +-
v8/src/objects/js-collator.cc | 4 +
v8/src/objects/js-duration-format.cc | 4 +-
v8/src/objects/js-list-format.cc | 4 +-
v8/src/objects/js-plural-rules.cc | 4 +
.../experimental/experimental-compiler.cc | 4 +-
v8/src/regexp/regexp-compiler-tonode.cc | 16 +--
v8/src/sandbox/testing.cc | 1 +
v8/src/snapshot/read-only-deserializer.cc | 4 +-
v8/src/torque/csa-generator.cc | 8 +-
v8/src/utils/sha-256.cc | 3 +
v8/src/wasm/wasm-external-refs.cc | 3 +
v8/src/wasm/wrappers.cc | 2 +
36 files changed, 204 insertions(+), 56 deletions(-)
diff --git a/v8/BUILD.gn b/v8/BUILD.gn
index 9a2b2cdd94..dce8b3f21a 100644
--- a/v8/BUILD.gn
+++ b/v8/BUILD.gn
@@ -7,6 +7,7 @@ import("//build/config/arm.gni")
import("//build/config/coverage/coverage.gni")
import("//build/config/dcheck_always_on.gni")
import("//build/config/host_byteorder.gni")
+import("//build/config/jumbo.gni")
import("//build/config/mips.gni")
import("//build/config/riscv.gni")
import("//build/config/rust.gni")
@@ -3055,26 +3056,54 @@ v8_source_set("v8_initializers") {
}
}
+ jumbo_excluded_sources = [
+ # TODO(mostynb@vewd.com): don't exclude these http://crbug.com/752428
+ "src/builtins/builtins-async-iterator-gen.cc",
+ "src/builtins/builtins-async-generator-gen.cc",
+
+ # These source files take an unusually large amount of time to
+ # compile. Build them separately to avoid bottlenecks.
+ "src/builtins/builtins-regexp-gen.cc",
+ "src/codegen/code-stub-assembler.cc",
+
+ # FIXME: Too many errors
+ "src/ic/binary-op-assembler.cc",
+ "src/ic/unary-op-assembler.cc",
+ "src/interpreter/interpreter-generator.cc",
+ ]
+
if (v8_current_cpu == "x86") {
sources += [
### gcmole(ia32) ###
"src/builtins/ia32/builtins-ia32.cc",
]
+ jumbo_excluded_sources += [
+ "src/builtins/ia32/builtins-ia32.cc",
+ ]
} else if (v8_current_cpu == "x64") {
sources += [
### gcmole(x64) ###
"src/builtins/x64/builtins-x64.cc",
]
+ jumbo_excluded_sources += [
+ "src/builtins/x64/builtins-x64.cc",
+ ]
} else if (v8_current_cpu == "arm") {
sources += [
### gcmole(arm) ###
"src/builtins/arm/builtins-arm.cc",
]
+ jumbo_excluded_sources += [
+ "src/builtins/arm/builtins-arm.cc",
+ ]
} else if (v8_current_cpu == "arm64") {
sources += [
### gcmole(arm64) ###
"src/builtins/arm64/builtins-arm64.cc",
]
+ jumbo_excluded_sources += [
+ "src/builtins/arm64/builtins-arm64.cc",
+ ]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [
### gcmole(mips64el) ###
@@ -5105,6 +5134,19 @@ v8_compiler_sources = [
"src/compiler/zone-stats.cc",
]
+v8_compiler_sources_jumbo_excluded = [
+ "src/compiler/bytecode-analysis.cc",
+
+ # `using namespace compiler::turboshaft` causes `Type` ambiguous
+ "src/compiler/backend/instruction-selector.cc",
+
+ # FIXME:
+ # no type named 'kTurboshaft' in 'v8::internal::compiler::PhaseKind'
+ # no type named 'kThreadSpecific' in 'v8::internal::RuntimeCallStats'
+ "src/compiler/turboshaft/csa-optimize-phase.cc",
+ "src/compiler/turboshaft/pipelines.cc",
+]
+
if (!v8_enable_maglev) {
# When Maglev is not enabled, Turboshaft still needs Maglev's graph builder.
v8_compiler_sources += [
@@ -5125,6 +5167,11 @@ if (v8_current_cpu == "x86") {
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
]
+ v8_compiler_sources_jumbo_excluded += [
+ # `using namespace turboshaft` causes `Type` ambiguous
+ "src/compiler/backend/ia32/instruction-selector-ia32.cc",
+ ]
+
} else if (v8_current_cpu == "x64") {
v8_compiler_sources += [
### gcmole(x64) ###
@@ -5133,6 +5180,9 @@ if (v8_current_cpu == "x86") {
"src/compiler/backend/x64/instruction-selector-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
]
+ v8_compiler_sources_jumbo_excluded += [
+ "src/compiler/backend/x64/instruction-selector-x64.cc", # Ditto
+ ]
} else if (v8_current_cpu == "arm") {
v8_compiler_sources += [
### gcmole(arm) ###
@@ -5141,6 +5191,9 @@ if (v8_current_cpu == "x86") {
"src/compiler/backend/arm/instruction-selector-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
]
+ v8_compiler_sources_jumbo_excluded += [
+ "src/compiler/backend/arm/instruction-selector-arm.cc", # Ditto
+ ]
} else if (v8_current_cpu == "arm64") {
v8_compiler_sources += [
### gcmole(arm64) ###
@@ -5149,6 +5202,9 @@ if (v8_current_cpu == "x86") {
"src/compiler/backend/arm64/instruction-selector-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
]
+ v8_compiler_sources_jumbo_excluded += [
+ "src/compiler/backend/arm64/instruction-selector-arm64.cc", # Ditto
+ ]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
v8_compiler_sources += [
### gcmole(mips64el) ###
@@ -5240,6 +5296,7 @@ v8_source_set("v8_compiler_for_mksnapshot_source_set") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = v8_compiler_sources
+ jumbo_excluded_sources = v8_compiler_sources_jumbo_excluded
public_deps = [
":generate_bytecode_builtins_list",
@@ -5274,6 +5331,7 @@ v8_source_set("v8_compiler") {
if (v8_enable_turbofan) {
sources = v8_compiler_sources
+ jumbo_excluded_sources = v8_compiler_sources_jumbo_excluded
} else {
# With Turbofan disabled, we only include the stubbed-out API.
sources = [ "src/compiler/turbofan-disabled.cc" ]
@@ -5822,6 +5880,8 @@ v8_source_set("v8_base_without_compiler") {
]
}
+ jumbo_excluded_sources = []
+
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-assembler.cc",
@@ -5849,6 +5909,10 @@ v8_source_set("v8_base_without_compiler") {
"src/maglev/arm64/maglev-assembler-arm64.cc",
"src/maglev/arm64/maglev-ir-arm64.cc",
]
+ jumbo_excluded_sources += [
+ "src/maglev/arm64/maglev-assembler-arm64.cc",
+ "src/maglev/arm64/maglev-ir-arm64.cc",
+ ]
} else if (v8_current_cpu == "riscv64") {
sources += [
"src/maglev/riscv/maglev-assembler-riscv.cc",
@@ -5859,6 +5923,10 @@ v8_source_set("v8_base_without_compiler") {
"src/maglev/x64/maglev-assembler-x64.cc",
"src/maglev/x64/maglev-ir-x64.cc",
]
+ jumbo_excluded_sources += [
+ "src/maglev/x64/maglev-assembler-x64.cc",
+ "src/maglev/x64/maglev-ir-x64.cc",
+ ]
} else if (v8_current_cpu == "s390x") {
sources += [
"src/maglev/s390/maglev-assembler-s390.cc",
@@ -5873,6 +5941,13 @@ v8_source_set("v8_base_without_compiler") {
"src/tracing/perfetto-logger.cc",
"src/tracing/perfetto-utils.cc",
]
+ jumbo_excluded_sources += [
+ "src/asmjs/asm-js.cc",
+ "src/asmjs/asm-parser.cc",
+ "src/asmjs/asm-scanner.cc",
+ "src/wasm/turboshaft-graph-interface.cc",
+ "src/wasm/wasm-module.cc",
+ ]
}
if (v8_enable_webassembly) {
@@ -5932,6 +6007,10 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/well-known-imports.cc",
"src/wasm/wrappers.cc",
]
+ jumbo_excluded_sources += [
+ # Too many marco conflictions
+ "src/wasm/baseline/liftoff-compiler.cc",
+ ]
if (v8_wasm_random_fuzzers) {
sources += [
### gcmole(all) ###
@@ -5966,6 +6045,26 @@ v8_source_set("v8_base_without_compiler") {
sources += [ "src/heap/reference-summarizer.cc" ]
}
+ jumbo_excluded_sources += [
+ "src/execution/clobber-registers.cc", # Host asm vs target asm includes
+ # TODO(mostynb@vewd.com): don't exclude these http://crbug.com/752428
+ "src/profiler/heap-snapshot-generator.cc", # Macro clash in mman-linux.h
+ "src/heap/local-heap.cc",
+ "src/heap/safepoint.cc",
+ "src/objects/js-display-names.cc",
+ "src/objects/js-relative-time-format.cc",
+ "src/objects/js-temporal-objects.cc",
+ "src/utils/ostreams.cc",
+
+ # These source files take an unusually large amount of time to
+ # compile. Build them separately to avoid bottlenecks.
+ "src/api/api.cc",
+ "src/heap/heap.cc",
+ "src/objects/elements.cc",
+ "src/objects/objects.cc",
+ "src/parsing/parser.cc",
+ ]
+
if (v8_current_cpu == "x86") {
sources += [
### gcmole(ia32) ###
@@ -6073,6 +6172,11 @@ v8_source_set("v8_base_without_compiler") {
if (is_win) {
sources += [ "src/diagnostics/unwinding-info-win64.cc" ]
}
+ jumbo_excluded_sources += [
+ # TODO(mostynb@vewd.com): fix this code so it doesn't need
+ # to be excluded, see the comments inside.
+ "src/codegen/arm64/instructions-arm64-constants.cc",
+ ]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [
### gcmole(mips64el) ###
diff --git a/v8/gni/v8.gni b/v8/gni/v8.gni
index bf265b293d..775fa7dd03 100644
--- a/v8/gni/v8.gni
+++ b/v8/gni/v8.gni
@@ -4,6 +4,7 @@
import("//build/config/chrome_build.gni")
import("//build/config/compiler/pgo/pgo.gni")
+import("//build/config/jumbo.gni")
import("//build/config/gclient_args.gni")
import("//build/config/ios/config.gni")
import("//build/config/ios/ios_sdk_overrides.gni")
@@ -330,9 +331,9 @@ template("v8_source_set") {
defined(v8_static_library) && v8_static_library && is_win) {
link_target_type = "split_static_library"
} else if (defined(v8_static_library) && v8_static_library) {
- link_target_type = "static_library"
+ link_target_type = "jumbo_static_library"
} else {
- link_target_type = "source_set"
+ link_target_type = "jumbo_source_set"
}
target(link_target_type, target_name) {
forward_variables_from(invoker,
@@ -351,7 +352,7 @@ template("v8_source_set") {
}
template("v8_header_set") {
- source_set(target_name) {
+ jumbo_source_set(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs -= v8_remove_configs
configs += v8_add_configs
diff --git a/v8/src/baseline/arm/baseline-compiler-arm-inl.h b/v8/src/baseline/arm/baseline-compiler-arm-inl.h
index 6d99dbe529..d6805a0a05 100644
--- a/v8/src/baseline/arm/baseline-compiler-arm-inl.h
+++ b/v8/src/baseline/arm/baseline-compiler-arm-inl.h
@@ -95,6 +95,8 @@ void BaselineCompiler::VerifyFrameSize() {
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer);
}
+#undef __
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/v8/src/baseline/baseline-compiler.cc b/v8/src/baseline/baseline-compiler.cc
index eaec6ca8a4..7e846bb671 100644
--- a/v8/src/baseline/baseline-compiler.cc
+++ b/v8/src/baseline/baseline-compiler.cc
@@ -2474,6 +2474,8 @@ SaveAccumulatorScope::~SaveAccumulatorScope() {
#undef RCS_BASELINE_SCOPE
#undef __
+#undef __
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/v8/src/compiler/backend/instruction.h b/v8/src/compiler/backend/instruction.h
index 99d9369bc3..28219c297e 100644
--- a/v8/src/compiler/backend/instruction.h
+++ b/v8/src/compiler/backend/instruction.h
@@ -520,9 +520,9 @@ class LocationOperand : public InstructionOperand {
return static_cast<int64_t>(value_) >> IndexField::kShift;
}
- Register GetRegister() const {
+ v8::internal::Register GetRegister() const {
DCHECK(IsRegister());
- return Register::from_code(register_code());
+ return v8::internal::Register::from_code(register_code());
}
FloatRegister GetFloatRegister() const {
diff --git a/v8/src/compiler/turboshaft/loop-unrolling-reducer.cc b/v8/src/compiler/turboshaft/loop-unrolling-reducer.cc
index cc5b406f1b..9fac217286 100644
--- a/v8/src/compiler/turboshaft/loop-unrolling-reducer.cc
+++ b/v8/src/compiler/turboshaft/loop-unrolling-reducer.cc
@@ -11,12 +11,12 @@
#include "src/compiler/turboshaft/loop-finder.h"
#ifdef DEBUG
-#define TRACE(x) \
+#define TRACE_IN_LOOP_UNROLLING_REDUCER(x) \
do { \
if (v8_flags.turboshaft_trace_unrolling) StdoutStream() << x << std::endl; \
} while (false)
#else
-#define TRACE(x)
+#define TRACE_IN_LOOP_UNROLLING_REDUCER(x)
#endif
namespace v8::internal::compiler::turboshaft {
@@ -27,7 +27,7 @@ using BinOp = StaticCanonicalForLoopMatcher::BinOp;
void LoopUnrollingAnalyzer::DetectUnrollableLoops() {
for (const auto& [start, info] : loop_finder_.LoopHeaders()) {
IterationCount iter_count = GetLoopIterationCount(info);
- TRACE("LoopUnrollingAnalyzer: loop at "
+ TRACE_IN_LOOP_UNROLLING_REDUCER("LoopUnrollingAnalyzer: loop at "
<< start->index() << " ==> iter_count=" << iter_count);
loop_iteration_count_.insert({start, iter_count});
diff --git a/v8/src/compiler/turboshaft/wasm-in-js-inlining-reducer-inl.h b/v8/src/compiler/turboshaft/wasm-in-js-inlining-reducer-inl.h
index e173e260ef..09c1fe0852 100644
--- a/v8/src/compiler/turboshaft/wasm-in-js-inlining-reducer-inl.h
+++ b/v8/src/compiler/turboshaft/wasm-in-js-inlining-reducer-inl.h
@@ -1237,6 +1237,8 @@ V<Any> WasmInJSInliningReducer<Next>::TryInlineWasmCall(
return emitting_decoder.interface().Result();
}
+#undef TRACE
+
} // namespace v8::internal::compiler::turboshaft
#include "src/compiler/turboshaft/undef-assembler-macros.inc"
diff --git a/v8/src/compiler/turboshaft/wasm-load-elimination-reducer.h b/v8/src/compiler/turboshaft/wasm-load-elimination-reducer.h
index 2b9d29c38f..ef46b1b07a 100644
--- a/v8/src/compiler/turboshaft/wasm-load-elimination-reducer.h
+++ b/v8/src/compiler/turboshaft/wasm-load-elimination-reducer.h
@@ -533,6 +533,7 @@ class WasmLoadEliminationReducer : public Next {
EMIT_OP(StringAsWtf16)
EMIT_OP(StringPrepareForGetCodeUnit)
EMIT_OP(AnyConvertExtern)
+#undef EMIT_OP
OpIndex REDUCE_INPUT_GRAPH(StructSet)(OpIndex ig_index,
const StructSetOp& op) {
diff --git a/v8/src/compiler/wasm-compiler.cc b/v8/src/compiler/wasm-compiler.cc
index 87a194b5c2..281f6eaf94 100644
--- a/v8/src/compiler/wasm-compiler.cc
+++ b/v8/src/compiler/wasm-compiler.cc
@@ -70,7 +70,7 @@ namespace {
// Use MachineType::Pointer() over Tagged() to load root pointers because they
// do not get compressed.
-#define LOAD_ROOT(RootName, factory_name) \
+#define LOAD_ROOT_IN_WASM_COMPILER(RootName, factory_name) \
(isolate_ ? graph()->NewNode(mcgraph()->common()->HeapConstant( \
isolate_->factory()->factory_name())) \
: gasm_->LoadImmutable( \
@@ -277,7 +277,7 @@ Node* WasmGraphBuilder::Int32Constant(int32_t value) {
}
Node* WasmGraphBuilder::UndefinedValue() {
- return LOAD_ROOT(UndefinedValue, undefined_value);
+ return LOAD_ROOT_IN_WASM_COMPILER(UndefinedValue, undefined_value);
}
// TODO(ahaas): Merge TrapId with TrapReason.
@@ -1236,6 +1236,6 @@ AssemblerOptions WasmStubAssemblerOptions() {
};
}
-#undef LOAD_ROOT
+#undef LOAD_ROOT_IN_WASM_COMPILER
} // namespace v8::internal::compiler
diff --git a/v8/src/diagnostics/perf-jit.cc b/v8/src/diagnostics/perf-jit.cc
index 6af13fe8cf..2040a7aea8 100644
--- a/v8/src/diagnostics/perf-jit.cc
+++ b/v8/src/diagnostics/perf-jit.cc
@@ -37,6 +37,8 @@
#include <sys/mman.h>
#include <unistd.h>
+#undef MAP_TYPE
+
#include <memory>
#include "src/base/platform/wrappers.h"
diff --git a/v8/src/handles/global-handles.cc b/v8/src/handles/global-handles.cc
index 4a504500e4..fdb26addbb 100644
--- a/v8/src/handles/global-handles.cc
+++ b/v8/src/handles/global-handles.cc
@@ -756,7 +756,7 @@ void GlobalHandles::InvokeSecondPassPhantomCallbacks() {
if (scope.CheckReenter()) {
TRACE_EVENT0("v8", "V8.GCPhantomHandleProcessingCallback");
isolate()->heap()->CallGCPrologueCallbacks(
- GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags,
+ v8::GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags,
GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
{
TRACE_GC(isolate_->heap()->tracer(),
@@ -768,7 +768,7 @@ void GlobalHandles::InvokeSecondPassPhantomCallbacks() {
}
}
isolate()->heap()->CallGCEpilogueCallbacks(
- GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags,
+ v8::GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags,
GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
}
}
diff --git a/v8/src/heap/code-range.cc b/v8/src/heap/code-range.cc
index 8109348782..babc9e52e4 100644
--- a/v8/src/heap/code-range.cc
+++ b/v8/src/heap/code-range.cc
@@ -64,7 +64,7 @@ size_t CodeRange::GetWritableReservedAreaSize() {
return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
}
-#define TRACE(...) \
+#define TRACE_IN_CODE_RANGE(...) \
if (v8_flags.trace_code_range_allocation) PrintF(__VA_ARGS__)
bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
@@ -114,7 +114,7 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
kMaxPCRelativeCodeRangeInMB > 1024 ? kMaxPCRelativeCodeRangeInMB : 4096;
auto preferred_region = GetPreferredRegion(kRadiusInMB, kPageSize);
- TRACE("=== Preferred region: [%p, %p)\n",
+ TRACE_IN_CODE_RANGE("=== Preferred region: [%p, %p)\n",
reinterpret_cast<void*>(preferred_region.begin()),
reinterpret_cast<void*>(preferred_region.end()));
@@ -139,10 +139,10 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
Address step =
RoundDown(preferred_region.size() / kAllocationTries, kPageSize);
for (int i = 0; i < kAllocationTries; i++) {
- TRACE("=== Attempt #%d, hint=%p\n", i,
+ TRACE_IN_CODE_RANGE("=== Attempt #%d, hint=%p\n", i,
reinterpret_cast<void*>(params.requested_start_hint));
if (candidate_cage.InitReservation(params)) {
- TRACE("=== Attempt #%d (%p): [%p, %p)\n", i,
+ TRACE_IN_CODE_RANGE("=== Attempt #%d (%p): [%p, %p)\n", i,
reinterpret_cast<void*>(params.requested_start_hint),
reinterpret_cast<void*>(candidate_cage.region().begin()),
reinterpret_cast<void*>(candidate_cage.region().end()));
@@ -167,7 +167,7 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
params.requested_start_hint = kNullAddress;
if (!VirtualMemoryCage::InitReservation(params)) return false;
}
- TRACE("=== Fallback attempt, hint=%p: [%p, %p)\n",
+ TRACE_IN_CODE_RANGE("=== Fallback attempt, hint=%p: [%p, %p)\n",
reinterpret_cast<void*>(params.requested_start_hint),
reinterpret_cast<void*>(region().begin()),
reinterpret_cast<void*>(region().end()));
@@ -197,14 +197,14 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
// the BoundedPageAllocator. Use it if it's big enough.
const Address non_allocatable_size = page_allocator_->begin() - base();
- TRACE("=== non-allocatable region: [%p, %p)\n",
+ TRACE_IN_CODE_RANGE("=== non-allocatable region: [%p, %p)\n",
reinterpret_cast<void*>(base()),
reinterpret_cast<void*>(base() + non_allocatable_size));
// Exclude the first page from allocatable pages if the required writable
// area doesn't fit into the non-allocatable area.
if (non_allocatable_size < required_writable_area_size) {
- TRACE("=== Exclude the first page from allocatable area\n");
+ TRACE_IN_CODE_RANGE("=== Exclude the first page from allocatable area\n");
excluded_allocatable_area_size = kPageSize;
CHECK(page_allocator_->AllocatePagesAt(page_allocator_->begin(),
excluded_allocatable_area_size,
diff --git a/v8/src/heap/cppgc/concurrent-marker.cc b/v8/src/heap/cppgc/concurrent-marker.cc
index 5b16920d22..e1e5199d27 100644
--- a/v8/src/heap/cppgc/concurrent-marker.cc
+++ b/v8/src/heap/cppgc/concurrent-marker.cc
@@ -19,6 +19,8 @@ namespace {
static constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease =
0.5;
+#define kDefaultDeadlineCheckInterval kDefaultDeadlineCheckInterval_ConcurrentMarker
+
static constexpr size_t kDefaultDeadlineCheckInterval = 750u;
template <StatsCollector::ConcurrentScopeId scope_id,
@@ -272,3 +274,5 @@ std::unique_ptr<Visitor> ConcurrentMarker::CreateConcurrentMarkingVisitor(
} // namespace internal
} // namespace cppgc
+
+#undef kDefaultDeadlineCheckInterval
diff --git a/v8/src/heap/cppgc/marker.cc b/v8/src/heap/cppgc/marker.cc
index a4ac8b6e8a..d582998d71 100644
--- a/v8/src/heap/cppgc/marker.cc
+++ b/v8/src/heap/cppgc/marker.cc
@@ -57,6 +57,8 @@ bool ExitIncrementalMarkingIfNeeded(MarkingConfig config, HeapBase& heap) {
return false;
}
+#define kDefaultDeadlineCheckInterval kDefaultDeadlineCheckInterval_Marker
+
static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
template <StatsCollector::ScopeId scope_id,
@@ -799,3 +801,5 @@ Marker::Marker(HeapBase& heap, cppgc::Platform* platform, MarkingConfig config)
} // namespace internal
} // namespace cppgc
+
+#undef kDefaultDeadlineCheckInterval
diff --git a/v8/src/heap/minor-mark-sweep.cc b/v8/src/heap/minor-mark-sweep.cc
index 32b77fddb2..2241d07e9c 100644
--- a/v8/src/heap/minor-mark-sweep.cc
+++ b/v8/src/heap/minor-mark-sweep.cc
@@ -870,7 +870,7 @@ namespace {
// NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation.
-intptr_t NewSpacePageEvacuationThreshold() {
+intptr_t NewSpacePageEvacuationThreshold2() {
return v8_flags.minor_ms_page_promotion_threshold *
MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
}
@@ -882,7 +882,7 @@ bool ShouldMovePage(PageMetadata* p, intptr_t live_bytes,
Heap* heap = p->heap();
DCHECK(!p->Chunk()->NeverEvacuate());
const bool should_move_page =
- ((live_bytes + wasted_bytes) > NewSpacePageEvacuationThreshold() ||
+ ((live_bytes + wasted_bytes) > NewSpacePageEvacuationThreshold2() ||
(p->AllocatedLabSize() == 0)) &&
(heap->new_space()->IsPromotionCandidate(p)) &&
heap->CanExpandOldGeneration(live_bytes);
@@ -893,7 +893,7 @@ bool ShouldMovePage(PageMetadata* p, intptr_t live_bytes,
", live bytes = %zu, wasted bytes = %zu, promotion threshold = %zu"
", allocated labs size = %zu\n",
p, should_move_page, live_bytes, wasted_bytes,
- NewSpacePageEvacuationThreshold(), p->AllocatedLabSize());
+ NewSpacePageEvacuationThreshold2(), p->AllocatedLabSize());
}
if (!should_move_page &&
(p->AgeInNewSpace() == v8_flags.minor_ms_max_page_age)) {
diff --git a/v8/src/inspector/BUILD.gn b/v8/src/inspector/BUILD.gn
index 9a86323237..422ea6032d 100644
--- a/v8/src/inspector/BUILD.gn
+++ b/v8/src/inspector/BUILD.gn
@@ -171,6 +171,7 @@ v8_source_set("inspector") {
"value-mirror.cc",
"value-mirror.h",
]
+ jumbo_excluded_sources = [ "value-mirror.cc" ]
}
#Target to generate all .cc files.
diff --git a/v8/src/inspector/value-mirror.cc b/v8/src/inspector/value-mirror.cc
index b02dc2370d..c804d2cf62 100644
--- a/v8/src/inspector/value-mirror.cc
+++ b/v8/src/inspector/value-mirror.cc
@@ -200,7 +200,7 @@ String16 abbreviateString(const String16& value, AbbreviateMode mode) {
return String16::concat(value.substring(0, maxLength - 1), ellipsis);
}
-String16 descriptionForSymbol(v8::Local<v8::Context> context,
+String16 descriptionForSymbol2(v8::Local<v8::Context> context,
v8::Local<v8::Symbol> symbol) {
v8::Isolate* isolate = context->GetIsolate();
return String16::concat(
@@ -711,7 +711,7 @@ class SymbolMirror final : public ValueMirrorBase {
v8Value(context->GetIsolate()).As<v8::Symbol>();
*result = RemoteObject::create()
.setType(RemoteObject::TypeEnum::Symbol)
- .setDescription(descriptionForSymbol(context, value))
+ .setDescription(descriptionForSymbol2(context, value))
.build();
return Response::Success();
}
@@ -726,7 +726,7 @@ class SymbolMirror final : public ValueMirrorBase {
.setName(name)
.setType(RemoteObject::TypeEnum::Symbol)
.setValue(abbreviateString(
- descriptionForSymbol(context, value), kEnd))
+ descriptionForSymbol2(context, value), kEnd))
.build();
}
@@ -738,7 +738,7 @@ class SymbolMirror final : public ValueMirrorBase {
*preview =
ObjectPreview::create()
.setType(RemoteObject::TypeEnum::Symbol)
- .setDescription(descriptionForSymbol(context, value))
+ .setDescription(descriptionForSymbol2(context, value))
.setOverflow(false)
.setProperties(std::make_unique<protocol::Array<PropertyPreview>>())
.build();
@@ -1531,7 +1531,7 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
name = toProtocolString(isolate, v8Name.As<v8::String>());
} else {
v8::Local<v8::Symbol> symbol = v8Name.As<v8::Symbol>();
- name = descriptionForSymbol(context, symbol);
+ name = descriptionForSymbol2(context, symbol);
symbolMirror = ValueMirror::create(context, symbol);
}
diff --git a/v8/src/libplatform/default-thread-isolated-allocator.cc b/v8/src/libplatform/default-thread-isolated-allocator.cc
index bda0e43cef..36a365b257 100644
--- a/v8/src/libplatform/default-thread-isolated-allocator.cc
+++ b/v8/src/libplatform/default-thread-isolated-allocator.cc
@@ -16,6 +16,8 @@
#include <unistd.h>
#endif
+#undef MAP_TYPE
+
#if V8_HAS_PKU_JIT_WRITE_PROTECT
extern int pkey_alloc(unsigned int flags, unsigned int access_rights) V8_WEAK;
diff --git a/v8/src/maglev/arm/maglev-ir-arm.cc b/v8/src/maglev/arm/maglev-ir-arm.cc
index 06fd376e05..9cc52478ee 100644
--- a/v8/src/maglev/arm/maglev-ir-arm.cc
+++ b/v8/src/maglev/arm/maglev-ir-arm.cc
@@ -912,6 +912,8 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
__ Ret();
}
+#undef __
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/v8/src/maglev/maglev-assembler-inl.h b/v8/src/maglev/maglev-assembler-inl.h
index 5c5a8a2b39..29503d39e3 100644
--- a/v8/src/maglev/maglev-assembler-inl.h
+++ b/v8/src/maglev/maglev-assembler-inl.h
@@ -574,11 +574,11 @@ void CheckArgs(Args&&... args) {}
#endif // DEBUG
-template <typename Descriptor, typename... Args>
+template <typename Descriptor2, typename... Args>
void PushArgumentsForBuiltin(MaglevAssembler* masm, std::tuple<Args...> args) {
std::apply(
[&](auto&&... stack_args) {
- if (Descriptor::kStackArgumentOrder == StackArgumentOrder::kDefault) {
+ if (Descriptor2::kStackArgumentOrder == StackArgumentOrder::kDefault) {
masm->Push(std::forward<decltype(stack_args)>(stack_args)...);
} else {
masm->PushReverse(std::forward<decltype(stack_args)>(stack_args)...);
diff --git a/v8/src/maglev/maglev-assembler.cc b/v8/src/maglev/maglev-assembler.cc
index 0347c71847..b134bc5109 100644
--- a/v8/src/maglev/maglev-assembler.cc
+++ b/v8/src/maglev/maglev-assembler.cc
@@ -736,6 +736,8 @@ void MaglevAssembler::TryMigrateInstanceAndMarkMapAsMigrationTarget(
save_register_state.DefineSafepoint();
}
+#undef __
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/v8/src/maglev/maglev-code-generator.cc b/v8/src/maglev/maglev-code-generator.cc
index 3bc1becd1f..f55a2864be 100644
--- a/v8/src/maglev/maglev-code-generator.cc
+++ b/v8/src/maglev/maglev-code-generator.cc
@@ -427,7 +427,6 @@ class ParallelMoveResolver {
// but otherwise this code cannot be compiled by compilers not yet
// implementing CWG2518.
static_assert(DecompressIfNeeded && COMPRESS_POINTERS_BOOL);
-
if (targets.needs_decompression == kNeedsDecompression) {
__ DecompressTagged(source_reg, source_reg);
}
@@ -474,7 +473,6 @@ class ParallelMoveResolver {
// but otherwise this code cannot be compiled by compilers not yet
// implementing CWG2518.
static_assert(DecompressIfNeeded && COMPRESS_POINTERS_BOOL);
-
if (targets.needs_decompression == kNeedsDecompression) {
__ DecompressTagged(register_with_slot_value, register_with_slot_value);
targets.needs_decompression = kDoesNotNeedDecompression;
@@ -2091,6 +2089,8 @@ Handle<DeoptimizationData> MaglevCodeGenerator::GenerateDeoptimizationData(
return data;
}
+#undef __
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/v8/src/maglev/maglev-ir.cc b/v8/src/maglev/maglev-ir.cc
index a3b2290d30..3325545a33 100644
--- a/v8/src/maglev/maglev-ir.cc
+++ b/v8/src/maglev/maglev-ir.cc
@@ -8119,6 +8119,8 @@ std::optional<int32_t> NodeBase::TryGetInt32ConstantInput(int index) {
return {};
}
+#undef __
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/v8/src/objects/js-atomics-synchronization.h b/v8/src/objects/js-atomics-synchronization.h
index 73a03828db..8b458f2fba 100644
--- a/v8/src/objects/js-atomics-synchronization.h
+++ b/v8/src/objects/js-atomics-synchronization.h
@@ -29,11 +29,11 @@ template <typename T>
class AsyncWaiterQueueNode;
} // namespace detail
-using detail::WaiterQueueLockGuard;
-using detail::WaiterQueueNode;
-using LockAsyncWaiterQueueNode = detail::AsyncWaiterQueueNode<JSAtomicsMutex>;
+using internal::detail::WaiterQueueLockGuard;
+using internal::detail::WaiterQueueNode;
+using LockAsyncWaiterQueueNode = internal::detail::AsyncWaiterQueueNode<JSAtomicsMutex>;
using WaitAsyncWaiterQueueNode =
- detail::AsyncWaiterQueueNode<JSAtomicsCondition>;
+ internal::detail::AsyncWaiterQueueNode<JSAtomicsCondition>;
// JSSynchronizationPrimitive is the base class for JSAtomicsMutex and
// JSAtomicsCondition. It contains a 32-bit state field and a pointer to a
diff --git a/v8/src/objects/js-collator.cc b/v8/src/objects/js-collator.cc
index 405cad1552..3c5032b750 100644
--- a/v8/src/objects/js-collator.cc
+++ b/v8/src/objects/js-collator.cc
@@ -52,6 +52,8 @@ Maybe<CaseFirst> GetCaseFirst(Isolate* isolate,
CaseFirst::kUndefined);
}
+#define CreateDataPropertyForOptions CreateDataPropertyForOptions_JSCollator
+
// TODO(gsathya): Consider internalizing the value strings.
void CreateDataPropertyForOptions(Isolate* isolate,
DirectHandle<JSObject> options,
@@ -593,3 +595,5 @@ const std::set<std::string>& JSCollator::GetAvailableLocales() {
} // namespace internal
} // namespace v8
+
+#undef CreateDataPropertyForOptions
diff --git a/v8/src/objects/js-duration-format.cc b/v8/src/objects/js-duration-format.cc
index 5a9414b853..bf27c3b742 100644
--- a/v8/src/objects/js-duration-format.cc
+++ b/v8/src/objects/js-duration-format.cc
@@ -1071,7 +1071,7 @@ MaybeDirectHandle<String> FormattedToString(
return Intl::FormattedToString(isolate, formatted);
}
-MaybeDirectHandle<JSArray> FormattedListToJSArray(
+MaybeDirectHandle<JSArray> FormattedListToJSArray_JSDurationFormat(
Isolate* isolate, const icu::FormattedValue& formatted,
const std::vector<std::vector<Part>>* parts,
JSDurationFormat::Separator separator) {
@@ -1134,7 +1134,7 @@ MaybeDirectHandle<JSArray> JSDurationFormat::FormatToParts(
Isolate* isolate, DirectHandle<JSDurationFormat> df,
Handle<Object> duration) {
const char* method_name = "Intl.DurationFormat.prototype.formatToParts";
- return FormatCommon<JSArray, true, FormattedListToJSArray>(
+ return FormatCommon<JSArray, true, FormattedListToJSArray_JSDurationFormat>(
isolate, df, duration, method_name);
}
diff --git a/v8/src/objects/js-list-format.cc b/v8/src/objects/js-list-format.cc
index d5dacda547..baca118dc8 100644
--- a/v8/src/objects/js-list-format.cc
+++ b/v8/src/objects/js-list-format.cc
@@ -247,7 +247,7 @@ DirectHandle<String> IcuFieldIdToType(Isolate* isolate, int32_t field_id) {
// A helper function to convert the FormattedList to a
// MaybeHandle<JSArray> for the implementation of formatToParts.
-MaybeDirectHandle<JSArray> FormattedListToJSArray(
+MaybeDirectHandle<JSArray> FormattedListToJSArray_JSListFormat(
Isolate* isolate, const icu::FormattedValue& formatted) {
DirectHandle<JSArray> array = isolate->factory()->NewJSArray(0);
icu::ConstrainedFieldPosition cfpos;
@@ -285,7 +285,7 @@ MaybeDirectHandle<JSArray> JSListFormat::FormatListToParts(
Isolate* isolate, DirectHandle<JSListFormat> format,
DirectHandle<FixedArray> list) {
return FormatListCommon<JSArray>(isolate, format, list,
- FormattedListToJSArray);
+ FormattedListToJSArray_JSListFormat);
}
namespace {
diff --git a/v8/src/objects/js-plural-rules.cc b/v8/src/objects/js-plural-rules.cc
index 4606b75f42..ad3d618764 100644
--- a/v8/src/objects/js-plural-rules.cc
+++ b/v8/src/objects/js-plural-rules.cc
@@ -223,6 +223,8 @@ MaybeDirectHandle<String> JSPluralRules::ResolvePluralRange(
return Intl::ToString(isolate, result);
}
+#define CreateDataPropertyForOptions CreateDataPropertyForOptions_JSPluralRules
+
namespace {
void CreateDataPropertyForOptions(Isolate* isolate,
@@ -387,3 +389,5 @@ const std::set<std::string>& JSPluralRules::GetAvailableLocales() {
} // namespace internal
} // namespace v8
+
+#undef CreateDataPropertyForOptions
diff --git a/v8/src/regexp/experimental/experimental-compiler.cc b/v8/src/regexp/experimental/experimental-compiler.cc
index 7bee9f1179..391dac8cad 100644
--- a/v8/src/regexp/experimental/experimental-compiler.cc
+++ b/v8/src/regexp/experimental/experimental-compiler.cc
@@ -224,7 +224,7 @@ bool ExperimentalRegExpCompiler::CanBeHandled(RegExpTree* tree,
return CanBeHandledVisitor::Check(tree, flags, capture_count);
}
-namespace {
+namespace experimental {
// A label in bytecode which starts with no known address. The address *must*
// be bound with `Bind` before the label goes out of scope.
@@ -1226,7 +1226,7 @@ class CompileVisitor : private RegExpVisitor {
ZoneList<RegExpInstruction> ExperimentalRegExpCompiler::Compile(
RegExpTree* tree, RegExpFlags flags, Zone* zone) {
- return CompileVisitor::Compile(tree, flags, zone);
+ return experimental::CompileVisitor::Compile(tree, flags, zone);
}
} // namespace internal
diff --git a/v8/src/regexp/regexp-compiler-tonode.cc b/v8/src/regexp/regexp-compiler-tonode.cc
index a71b6112e5..b7de236b42 100644
--- a/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/v8/src/regexp/regexp-compiler-tonode.cc
@@ -23,7 +23,7 @@ namespace internal {
using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
-constexpr base::uc32 kMaxCodePoint = 0x10ffff;
+constexpr base::uc32 kMaxCodePoint2 = 0x10ffff;
constexpr int kMaxUtf16CodeUnit = 0xffff;
constexpr uint32_t kMaxUtf16CodeUnitU = 0xffff;
@@ -71,7 +71,7 @@ bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
}
}
- return range.to() == kMaxCodePoint;
+ return range.to() == kMaxCodePoint2;
}
bool CompareRanges(ZoneList<CharacterRange>* ranges, const int* special_class,
@@ -480,7 +480,7 @@ RegExpNode* RegExpClassRanges::ToNode(RegExpCompiler* compiler,
// internally created for an empty set.
DCHECK_IMPLIES(
IsUnicodeSets(compiler->flags()),
- ranges->length() == 1 && ranges->first().IsEverything(kMaxCodePoint));
+ ranges->length() == 1 && ranges->first().IsEverything(kMaxCodePoint2));
ZoneList<CharacterRange>* negated =
zone->New<ZoneList<CharacterRange>>(2, zone);
CharacterRange::Negate(ranges, negated, zone);
@@ -1377,7 +1377,7 @@ void AddClassNegated(const int* elmv, int elmc,
elmc--;
DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
DCHECK_NE(0x0000, elmv[0]);
- DCHECK_NE(kMaxCodePoint, elmv[elmc - 1]);
+ DCHECK_NE(kMaxCodePoint2, elmv[elmc - 1]);
base::uc16 last = 0x0000;
for (int i = 0; i < elmc; i += 2) {
DCHECK(last <= elmv[i] - 1);
@@ -1385,7 +1385,7 @@ void AddClassNegated(const int* elmv, int elmc,
ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
last = elmv[i + 1];
}
- ranges->Add(CharacterRange::Range(last, kMaxCodePoint), zone);
+ ranges->Add(CharacterRange::Range(last, kMaxCodePoint2), zone);
}
} // namespace
@@ -1730,8 +1730,8 @@ void CharacterRange::Negate(const ZoneList<CharacterRange>* ranges,
from = range.to() + 1;
i++;
}
- if (from < kMaxCodePoint) {
- negated_ranges->Add(CharacterRange::Range(from, kMaxCodePoint), zone);
+ if (from < kMaxCodePoint2) {
+ negated_ranges->Add(CharacterRange::Range(from, kMaxCodePoint2), zone);
}
}
@@ -1782,7 +1782,7 @@ void SafeAdvanceRange(const ZoneList<CharacterRange>* range, int* index,
*from = range->at(*index).from();
*to = range->at(*index).to();
} else {
- *from = kMaxCodePoint + 1;
+ *from = kMaxCodePoint2 + 1;
}
}
diff --git a/v8/src/sandbox/testing.cc b/v8/src/sandbox/testing.cc
index 5fd8a5572d..a049f497a4 100644
--- a/v8/src/sandbox/testing.cc
+++ b/v8/src/sandbox/testing.cc
@@ -18,6 +18,7 @@
#include <signal.h>
#include <sys/mman.h>
#include <unistd.h>
+#undef MAP_TYPE
#endif // V8_OS_LINUX
#ifdef V8_USE_ADDRESS_SANITIZER
diff --git a/v8/src/snapshot/read-only-deserializer.cc b/v8/src/snapshot/read-only-deserializer.cc
index f1056d7aa6..66ef9682a6 100644
--- a/v8/src/snapshot/read-only-deserializer.cc
+++ b/v8/src/snapshot/read-only-deserializer.cc
@@ -188,7 +188,7 @@ void ReadOnlyDeserializer::DeserializeIntoIsolate() {
}
}
-void NoExternalReferencesCallback() {
+void NoExternalReferencesCallback2() {
// The following check will trigger if a function or object template with
// references to native functions have been deserialized from snapshot, but
// no actual external references were provided when the isolate was created.
@@ -239,7 +239,7 @@ class ObjectPostProcessor final {
const intptr_t* refs = isolate_->api_external_references();
Address address =
refs == nullptr
- ? reinterpret_cast<Address>(NoExternalReferencesCallback)
+ ? reinterpret_cast<Address>(NoExternalReferencesCallback2)
: static_cast<Address>(refs[index]);
DCHECK_NE(address, kNullAddress);
return address;
diff --git a/v8/src/torque/csa-generator.cc b/v8/src/torque/csa-generator.cc
index 4495f0b100..0b4d7f92e4 100644
--- a/v8/src/torque/csa-generator.cc
+++ b/v8/src/torque/csa-generator.cc
@@ -946,8 +946,8 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
}
namespace {
-std::string GetBitFieldSpecialization(const Type* container,
- const BitField& field) {
+std::string GetBitFieldSpecialization2(const Type* container,
+ const BitField& field) {
auto smi_tagged_type =
Type::MatchUnaryGeneric(container, TypeOracle::GetSmiTaggedGeneric());
std::string container_type = smi_tagged_type
@@ -1000,7 +1000,7 @@ void CSAGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
out() << " " << result_name << " = ca_.UncheckedCast<"
<< field_type->GetGeneratedTNodeTypeName()
<< ">(CodeStubAssembler(state_)." << decoder << "<"
- << GetBitFieldSpecialization(struct_type, instruction.bit_field)
+ << GetBitFieldSpecialization2(struct_type, instruction.bit_field)
<< ">(ca_.UncheckedCast<" << struct_word_type << ">("
<< bit_field_struct << ")));\n";
}
@@ -1042,7 +1042,7 @@ void CSAGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
std::string result_expression =
"CodeStubAssembler(state_)." + encoder + "<" +
- GetBitFieldSpecialization(struct_type, instruction.bit_field) +
+ GetBitFieldSpecialization2(struct_type, instruction.bit_field) +
">(ca_.UncheckedCast<" + struct_word_type + ">(" + bit_field_struct +
"), ca_.UncheckedCast<" + field_word_type + ">(" + value + ")" +
(instruction.starts_as_zero ? ", true" : "") + ")";
diff --git a/v8/src/utils/sha-256.cc b/v8/src/utils/sha-256.cc
index 4b1d2bd42b..f55c0b4aec 100644
--- a/v8/src/utils/sha-256.cc
+++ b/v8/src/utils/sha-256.cc
@@ -174,3 +174,6 @@ const uint8_t* SHA256_hash(const void* data, size_t len, uint8_t* digest) {
} // namespace internal
} // namespace v8
+
+#undef ror
+#undef shr
diff --git a/v8/src/wasm/wasm-external-refs.cc b/v8/src/wasm/wasm-external-refs.cc
index 95f9952ef3..6ecaf895a1 100644
--- a/v8/src/wasm/wasm-external-refs.cc
+++ b/v8/src/wasm/wasm-external-refs.cc
@@ -750,6 +750,8 @@ V ReadAndIncrementOffset(Address data, size_t* offset) {
return result;
}
+#define kSuccess kSuccess_WASMExternalRefs
+
constexpr int32_t kSuccess = 1;
constexpr int32_t kOutOfBounds = 0;
} // namespace
@@ -1147,3 +1149,4 @@ Address load_old_fp(Isolate* isolate) {
#undef V8_WITH_SANITIZER
#undef RESET_THREAD_IN_WASM_FLAG_FOR_ASAN_ON_WINDOWS
+#undef kSuccess
diff --git a/v8/src/wasm/wrappers.cc b/v8/src/wasm/wrappers.cc
index 1df44f104b..5846d10206 100644
--- a/v8/src/wasm/wrappers.cc
+++ b/v8/src/wasm/wrappers.cc
@@ -1399,4 +1399,6 @@ void BuildWasmWrapper(compiler::turboshaft::PipelineData* data,
}
}
+#include "src/compiler/turboshaft/undef-assembler-macros.inc"
+
} // namespace v8::internal::wasm