diff --git a/common.gypi b/common.gypi
index 4745bb5ac77639..a3f155fde2861a 100644
--- a/common.gypi
+++ b/common.gypi
@@ -36,7 +36,7 @@
 
     # Reset this number to 0 on major V8 upgrades.
     # Increment by one for each non-official patch applied to deps/v8.
-    'v8_embedder_string': '-node.17',
+    'v8_embedder_string': '-node.21',
 
     ##### V8 defaults for Node.js #####
 
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index bda33c185fe683..219837ff45e9e3 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -3237,6 +3237,7 @@ v8_source_set("v8_base_without_compiler") {
     "src/wasm/baseline/liftoff-compiler.cc",
     "src/wasm/baseline/liftoff-compiler.h",
     "src/wasm/baseline/liftoff-register.h",
+    "src/wasm/code-space-access.h",
     "src/wasm/compilation-environment.h",
     "src/wasm/decoder.h",
     "src/wasm/function-body-decoder-impl.h",
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index aae381b080617f..1bf75a1d42fc78 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -384,7 +384,13 @@ class PageAllocator {
     kReadWrite,
     // TODO(hpayer): Remove this flag. Memory should never be rwx.
     kReadWriteExecute,
-    kReadExecute
+    kReadExecute,
+    // Set this when reserving memory that will later require kReadWriteExecute
+    // permissions. The resulting behavior is platform-specific, currently
+    // this is used to set the MAP_JIT flag on Apple Silicon.
+    // TODO(jkummerow): Remove this when Wasm has a platform-independent
+    // w^x implementation.
+    kNoAccessWillJitLater
   };
 
   /**
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index 98b2c690960336..9f48ee79fe5424 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -6,6 +6,10 @@
 
 #include "src/base/platform/platform.h"
 
+#if V8_OS_MACOSX
+#include <sys/mman.h>  // For MAP_JIT.
+#endif
+
 namespace v8 {
 namespace base {
 
@@ -21,6 +25,8 @@ STATIC_ASSERT_ENUM(PageAllocator::kReadWriteExecute,
                    base::OS::MemoryPermission::kReadWriteExecute);
 STATIC_ASSERT_ENUM(PageAllocator::kReadExecute,
                    base::OS::MemoryPermission::kReadExecute);
+STATIC_ASSERT_ENUM(PageAllocator::kNoAccessWillJitLater,
+                   base::OS::MemoryPermission::kNoAccessWillJitLater);
 
 #undef STATIC_ASSERT_ENUM
 
@@ -38,6 +44,14 @@ void* PageAllocator::GetRandomMmapAddr() {
 
 void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment,
                                    PageAllocator::Permission access) {
+#if !(V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT))
+  // kNoAccessWillJitLater is only used on Apple Silicon. Map it to regular
+  // kNoAccess on other platforms, so code doesn't have to handle both enum
+  // values.
+  if (access == PageAllocator::kNoAccessWillJitLater) {
+    access = PageAllocator::kNoAccess;
+  }
+#endif
   return base::OS::Allocate(hint, size, alignment,
                             static_cast<base::OS::MemoryPermission>(access));
 }
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index 92a5fbe490f4c3..b9da2f1cd592db 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -33,6 +33,7 @@ namespace {
 DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
   switch (access) {
     case OS::MemoryPermission::kNoAccess:
+    case OS::MemoryPermission::kNoAccessWillJitLater:
       return PAGE_NOACCESS;
     case OS::MemoryPermission::kRead:
       return PAGE_READONLY;
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index fa175c39177aea..35a508a140ebd7 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -18,6 +18,7 @@ namespace {
 uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
   switch (access) {
     case OS::MemoryPermission::kNoAccess:
+    case OS::MemoryPermission::kNoAccessWillJitLater:
       return 0;  // no permissions
     case OS::MemoryPermission::kRead:
       return ZX_VM_PERM_READ;
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 14294019d90dd0..4b49968baa053f 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -125,6 +125,7 @@ const int kMmapFdOffset = 0;
 int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
   switch (access) {
     case OS::MemoryPermission::kNoAccess:
+    case OS::MemoryPermission::kNoAccessWillJitLater:
       return PROT_NONE;
     case OS::MemoryPermission::kRead:
       return PROT_READ;
@@ -152,6 +153,11 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access,
     flags |= MAP_LAZY;
 #endif  // V8_OS_QNX
   }
+#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT)
+  if (access == OS::MemoryPermission::kNoAccessWillJitLater) {
+    flags |= MAP_JIT;
+  }
+#endif
   return flags;
 }
 
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 5db3e343103dd0..6be63dee137a81 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -753,6 +753,7 @@ namespace {
 DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
   switch (access) {
     case OS::MemoryPermission::kNoAccess:
+    case OS::MemoryPermission::kNoAccessWillJitLater:
       return PAGE_NOACCESS;
     case OS::MemoryPermission::kRead:
       return PAGE_READONLY;
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index d5f59d1d7a8d8a..c4895a5b274374 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -167,7 +167,10 @@ class V8_BASE_EXPORT OS {
     kReadWrite,
     // TODO(hpayer): Remove this flag. Memory should never be rwx.
     kReadWriteExecute,
-    kReadExecute
+    kReadExecute,
+    // TODO(jkummerow): Remove this when Wasm has a platform-independent
+    // w^x implementation.
+    kNoAccessWillJitLater
   };
 
   static bool HasLazyCommits();
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index 6169acbfd6687a..022ac82ea6fa28 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -213,15 +213,17 @@ bool OnCriticalMemoryPressure(size_t length) {
 VirtualMemory::VirtualMemory() = default;
 
 VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
-                             void* hint, size_t alignment)
+                             void* hint, size_t alignment, JitPermission jit)
     : page_allocator_(page_allocator) {
   DCHECK_NOT_NULL(page_allocator);
   DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
   size_t page_size = page_allocator_->AllocatePageSize();
   alignment = RoundUp(alignment, page_size);
-  Address address = reinterpret_cast<Address>(
-      AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment,
-                    PageAllocator::kNoAccess));
+  PageAllocator::Permission permissions =
+      jit == kMapAsJittable ? PageAllocator::kNoAccessWillJitLater
+                            : PageAllocator::kNoAccess;
+  Address address = reinterpret_cast<Address>(AllocatePages(
+      page_allocator_, hint, RoundUp(size, page_size), alignment, permissions));
   if (address != kNullAddress) {
     DCHECK(IsAligned(address, alignment));
     region_ = base::AddressRegion(address, size);
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index 7106b1c749a893..a82012310b8efe 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -156,6 +156,8 @@ V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
 // Represents and controls an area of reserved memory.
 class VirtualMemory final {
  public:
+  enum JitPermission { kNoJit, kMapAsJittable };
+
   // Empty VirtualMemory object, controlling no reserved memory.
   V8_EXPORT_PRIVATE VirtualMemory();
 
@@ -164,8 +166,8 @@ class VirtualMemory final {
   // size. The |size| must be aligned with |page_allocator|'s commit page size.
   // This may not be at the position returned by address().
   V8_EXPORT_PRIVATE VirtualMemory(v8::PageAllocator* page_allocator,
-                                  size_t size, void* hint,
-                                  size_t alignment = 1);
+                                  size_t size, void* hint, size_t alignment = 1,
+                                  JitPermission jit = kNoJit);
 
   // Construct a virtual memory by assigning it some already mapped address
   // and size.
diff --git a/deps/v8/src/wasm/code-space-access.h b/deps/v8/src/wasm/code-space-access.h
new file mode 100644
index 00000000000000..5eeb980e17eddc
--- /dev/null
+++ b/deps/v8/src/wasm/code-space-access.h
@@ -0,0 +1,69 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_CODE_SPACE_ACCESS_H_
+#define V8_WASM_CODE_SPACE_ACCESS_H_
+
+#include "src/base/build_config.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+
+// Ignoring this warning is considered better than relying on
+// __builtin_available.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability-new"
+inline void SwitchMemoryPermissionsToWritable() {
+  pthread_jit_write_protect_np(0);
+}
+inline void SwitchMemoryPermissionsToExecutable() {
+  pthread_jit_write_protect_np(1);
+}
+#pragma clang diagnostic pop
+
+namespace wasm {
+
+class CodeSpaceWriteScope {
+ public:
+  // TODO(jkummerow): Background threads could permanently stay in
+  // writable mode; only the main thread has to switch back and forth.
+  CodeSpaceWriteScope() {
+    if (code_space_write_nesting_level_ == 0) {
+      SwitchMemoryPermissionsToWritable();
+    }
+    code_space_write_nesting_level_++;
+  }
+  ~CodeSpaceWriteScope() {
+    code_space_write_nesting_level_--;
+    if (code_space_write_nesting_level_ == 0) {
+      SwitchMemoryPermissionsToExecutable();
+    }
+  }
+
+ private:
+  static thread_local int code_space_write_nesting_level_;
+};
+
+#define CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope _write_access_;
+
+}  // namespace wasm
+
+#else  // Not Mac-on-arm64.
+
+// Nothing to do, we map code memory with rwx permissions.
+inline void SwitchMemoryPermissionsToWritable() {}
+inline void SwitchMemoryPermissionsToExecutable() {}
+
+#define CODE_SPACE_WRITE_SCOPE
+
+#endif  // V8_OS_MACOSX && V8_HOST_ARCH_ARM64
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_CODE_SPACE_ACCESS_H_
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 2f5a3f479b2475..fc657d634dba7c 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -6,6 +6,7 @@
 
 #include <iomanip>
 
+#include "src/base/build_config.h"
 #include "src/base/iterator.h"
 #include "src/base/macros.h"
 #include "src/base/platform/platform.h"
@@ -21,6 +22,7 @@
 #include "src/snapshot/embedded/embedded-data.h"
 #include "src/utils/ostreams.h"
 #include "src/utils/vector.h"
+#include "src/wasm/code-space-access.h"
 #include "src/wasm/compilation-environment.h"
 #include "src/wasm/function-compiler.h"
 #include "src/wasm/jump-table-assembler.h"
@@ -47,6 +49,10 @@ namespace wasm {
 
 using trap_handler::ProtectedInstructionData;
 
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
+#endif
+
 base::AddressRegion DisjointAllocationPool::Merge(
     base::AddressRegion new_region) {
   // Find the possible insertion position by identifying the first region whose
@@ -731,6 +737,7 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
   // Zap code area and collect freed code regions.
   DisjointAllocationPool freed_regions;
   size_t code_size = 0;
+  CODE_SPACE_WRITE_SCOPE
   for (WasmCode* code : codes) {
     ZapCode(code->instruction_start(), code->instructions().size());
     FlushInstructionCache(code->instruction_start(),
@@ -847,6 +854,7 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
 }
 
 WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
+  CODE_SPACE_WRITE_SCOPE
   // For off-heap builtins, we create a copy of the off-heap instruction stream
   // instead of the on-heap code object containing the trampoline. Ensure that
   // we do not apply the on-heap reloc info to the off-heap instructions.
@@ -942,6 +950,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
   if (!lazy_compile_table_) {
     uint32_t num_slots = module_->num_declared_functions;
     WasmCodeRefScope code_ref_scope;
+    CODE_SPACE_WRITE_SCOPE
     base::AddressRegion single_code_space_region;
     {
       base::MutexGuard guard(&allocation_mutex_);
@@ -1003,6 +1012,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
   const int code_comments_offset = desc.code_comments_offset;
   const int instr_size = desc.instr_size;
 
+  CODE_SPACE_WRITE_SCOPE
   memcpy(dst_code_bytes.begin(), desc.buffer,
          static_cast<size_t>(desc.instr_size));
 
@@ -1138,6 +1148,7 @@ WasmCode* NativeModule::AddDeserializedCode(
     Vector<const byte> protected_instructions_data,
     Vector<const byte> reloc_info, Vector<const byte> source_position_table,
     WasmCode::Kind kind, ExecutionTier tier) {
+  // CodeSpaceWriteScope is provided by the caller.
   Vector<uint8_t> dst_code_bytes =
       code_allocator_.AllocateForCode(this, instructions.size());
   memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
@@ -1196,6 +1207,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
   Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
       this, jump_table_size, region, allocator_lock);
   DCHECK(!code_space.empty());
+  CODE_SPACE_WRITE_SCOPE
   ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
   std::unique_ptr<WasmCode> code{
       new WasmCode{this,                  // native_module
@@ -1221,6 +1233,7 @@ void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
   // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
   DCHECK(!allocation_mutex_.TryLock());
 
+  CODE_SPACE_WRITE_SCOPE
   for (auto& code_space_data : code_space_data_) {
     DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
     if (!code_space_data.jump_table) continue;
@@ -1283,6 +1296,7 @@ void NativeModule::AddCodeSpace(
 #endif  // V8_OS_WIN64
 
   WasmCodeRefScope code_ref_scope;
+  CODE_SPACE_WRITE_SCOPE
   WasmCode* jump_table = nullptr;
   WasmCode* far_jump_table = nullptr;
   const uint32_t num_wasm_functions = module_->num_declared_functions;
@@ -1596,7 +1610,11 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
   if (!BackingStore::ReserveAddressSpace(size)) return {};
   if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
 
-  VirtualMemory mem(page_allocator, size, hint, allocate_page_size);
+  // When we start exposing Wasm in jitless mode, then the jitless flag
+  // will have to determine whether we set kMapAsJittable or not.
+  DCHECK(!FLAG_jitless);
+  VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
+                    VirtualMemory::kMapAsJittable);
   if (!mem.IsReserved()) {
     BackingStore::ReleaseReservation(size);
     return {};
@@ -1843,6 +1861,7 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
   generated_code.reserve(results.size());
 
   // Now copy the generated code into the code space and relocate it.
+  CODE_SPACE_WRITE_SCOPE
   for (auto& result : results) {
     DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
     size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index e5bab7e2cdc57c..f4f5f992682a06 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -13,6 +13,7 @@
 #include "src/utils/ostreams.h"
 #include "src/utils/utils.h"
 #include "src/utils/version.h"
+#include "src/wasm/code-space-access.h"
 #include "src/wasm/function-compiler.h"
 #include "src/wasm/module-compiler.h"
 #include "src/wasm/module-decoder.h"
@@ -534,6 +535,7 @@ void NativeModuleDeserializer::ReadCode(int fn_index, Reader* reader) {
   auto protected_instructions =
       reader->ReadVector<byte>(protected_instructions_size);
 
+  CODE_SPACE_WRITE_SCOPE
   WasmCode* code = native_module_->AddDeserializedCode(
       fn_index, code_buffer, stack_slot_count, tagged_parameter_slots,
       safepoint_table_offset, handler_table_offset, constant_pool_offset,
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 21db27c5d3d4e4..1f93ec28c299ff 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -176,6 +176,13 @@
   'test-debug/DebugBreakStackTrace': [PASS, SLOW],
 }],  # 'arch == arm64 and simulator_run'
 
+['arch == arm64 and system == macos and not simulator_run', {
+  # printf, being a variadic function, has a different, stack-based ABI on
+  # Apple silicon. See:
+  # https://developer.apple.com/library/archive/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html
+  'test-assembler-arm64/printf_no_preserve': [SKIP],
+}],  # arch == arm64 and system == macos and not simulator_run
+
 ##############################################################################
 ['variant == nooptimization and (arch == arm or arch == arm64) and simulator_run', {
   # Slow tests: https://crbug.com/v8/7783
@@ -489,6 +496,7 @@
   'test-jump-table-assembler/*': [SKIP],
   'test-gc/*': [SKIP],
   'test-grow-memory/*': [SKIP],
+  'test-liftoff-inspection/*': [SKIP],
   'test-run-wasm-64/*': [SKIP],
   'test-run-wasm-asmjs/*': [SKIP],
   'test-run-wasm-atomics64/*': [SKIP],
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 19da59e1727f80..52aaf3162b1991 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -11720,9 +11720,9 @@ TEST(system_msr) {
   const uint64_t fpcr_core = 0x07C00000;
 
   // All FPCR fields (including fields which may be read-as-zero):
-  //  Stride, Len
+  //  Stride, FZ16, Len
   //  IDE, IXE, UFE, OFE, DZE, IOE
-  const uint64_t fpcr_all = fpcr_core | 0x00379F00;
+  const uint64_t fpcr_all = fpcr_core | 0x003F9F00;
 
   SETUP();
 
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 263951b573fd04..f79b848dc1b581 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -41,8 +41,9 @@ template <class T>
 using TVariable = TypedCodeAssemblerVariable<T>;
 using PromiseResolvingFunctions = TorqueStructPromiseResolvingFunctions;
 
-int sum10(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7,
-          int a8, int a9) {
+intptr_t sum10(intptr_t a0, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4,
+               intptr_t a5, intptr_t a6, intptr_t a7, intptr_t a8,
+               intptr_t a9) {
   return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9;
 }
 
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index e8c89b7232b3b0..82baa9fe96212d 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -6,6 +6,7 @@
 #include "src/codegen/macro-assembler-inl.h"
 #include "src/execution/simulator.h"
 #include "src/handles/handles-inl.h"
+#include "src/wasm/code-space-access.h"
 #include "test/cctest/cctest.h"
 #include "test/common/assembler-tester.h"
 
@@ -179,11 +180,15 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
 
     CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
                          buffer->size(), v8::PageAllocator::kReadWriteExecute));
+    SwitchMemoryPermissionsToWritable();
     FloodWithInc(isolate, buffer.get());
     FlushInstructionCache(buffer->start(), buffer->size());
+    SwitchMemoryPermissionsToExecutable();
     CHECK_EQ(23 + kNumInstr, f.Call(23));  // Call into generated code.
+    SwitchMemoryPermissionsToWritable();
     FloodWithNop(isolate, buffer.get());
     FlushInstructionCache(buffer->start(), buffer->size());
+    SwitchMemoryPermissionsToExecutable();
     CHECK_EQ(23, f.Call(23));  // Call into generated code.
   }
 }
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index 99ec7d25ab457c..a0dd4cc33be301 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -8,6 +8,7 @@
 #include "src/codegen/macro-assembler-inl.h"
 #include "src/execution/simulator.h"
 #include "src/utils/utils.h"
+#include "src/wasm/code-space-access.h"
 #include "src/wasm/jump-table-assembler.h"
 #include "test/cctest/cctest.h"
 #include "test/common/assembler-tester.h"
@@ -33,7 +34,12 @@ constexpr uint32_t kJumpTableSize =
     JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
 
 // Must be a safe commit page size.
+#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64
+// See kAppleArmPageSize in platform-posix.cc.
+constexpr size_t kThunkBufferSize = 1 << 14;
+#else
 constexpr size_t kThunkBufferSize = 4 * KB;
+#endif
 
 #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
 constexpr uint32_t kAvailableBufferSlots =
@@ -154,6 +160,7 @@ class JumpTableRunner : public v8::base::Thread {
 
   void Run() override {
     TRACE("Runner #%d is starting ...\n", runner_id_);
+    SwitchMemoryPermissionsToExecutable();
     GeneratedCode<void>::FromAddress(CcTest::i_isolate(), slot_address_).Call();
     TRACE("Runner #%d is stopping ...\n", runner_id_);
     USE(runner_id_);
@@ -176,6 +183,7 @@ class JumpTablePatcher : public v8::base::Thread {
 
   void Run() override {
     TRACE("Patcher %p is starting ...\n", this);
+    SwitchMemoryPermissionsToWritable();
     Address slot_address =
         slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_);
     // First, emit code to the two thunks.
@@ -235,6 +243,7 @@ TEST(JumpTablePatchingStress) {
 
   std::bitset<kAvailableBufferSlots> used_thunk_slots;
   buffer->MakeWritableAndExecutable();
+  SwitchMemoryPermissionsToWritable();
 
   // Iterate through jump-table slots to hammer at different alignments within
   // the jump-table, thereby increasing stress for variable-length ISAs.
diff --git a/deps/v8/test/unittests/heap/unmapper-unittest.cc b/deps/v8/test/unittests/heap/unmapper-unittest.cc
index bd476cd1ec1682..a919945d3f4ee7 100644
--- a/deps/v8/test/unittests/heap/unmapper-unittest.cc
+++ b/deps/v8/test/unittests/heap/unmapper-unittest.cc
@@ -170,6 +170,7 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
     os << "  page: [" << start << ", " << end << "), access: ";
     switch (access) {
       case PageAllocator::kNoAccess:
+      case PageAllocator::kNoAccessWillJitLater:
         os << "--";
         break;
       case PageAllocator::kRead:
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index 20406242778652..96dd893db20568 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -17,6 +17,27 @@
   'RandomNumberGenerator.NextSampleSlowInvalidParam2': [SKIP],
 }],  # system == macos and asan
 
+['system == macos and arch == arm64 and not simulator_run', {
+  # Throwing C++ exceptions doesn't work; probably because the unittests
+  # binary is built with -fno-exceptions?
+  'LanguageServerJson.LexerError': [SKIP],
+  'LanguageServerJson.ParserError': [SKIP],
+  'Torque.DoubleUnderScorePrefixIllegalForIdentifiers': [SKIP],
+  'Torque.Enums': [SKIP],
+  'Torque.ImportNonExistentFile': [SKIP],
+
+  # Test uses fancy signal handling. Needs investigation.
+  'MemoryAllocationPermissionsTest.DoTest': [SKIP],
+
+  # cppgc::internal::kGuardPageSize is smaller than kAppleArmPageSize.
+  'PageMemoryRegionTest.PlatformUsesGuardPages': [FAIL],
+
+  # Time tick resolution appears to be ~42 microseconds. Tests expect 1 us.
+  'TimeTicks.NowResolution': [FAIL],
+  'RuntimeCallStatsTest.BasicJavaScript': [SKIP],
+  'RuntimeCallStatsTest.FunctionLengthGetter': [SKIP],
+}],  # system == macos and arch == arm64 and not simulator_run
+
 ##############################################################################
 ['lite_mode or variant == jitless', {
   # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.