diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6a1c9fca5260b228b01010d77d1496f260a7bf96..92cadfdc88ca00cebf4dfb21aee2471b014bcd51 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1607,7 +1607,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 		emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
 		break;
 	/* speculation barrier */
-	case BPF_ST | BPF_NOSPEC:
+	case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+	case BPF_ST | BPF_NOSPEC_V4:
 		break;
 	/* ST: *(size *)(dst + off) = imm */
 	case BPF_ST | BPF_MEM | BPF_W:
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index ec2174838f2af0ff9a51ee2f224527362b60003a..9d573f949081bc9b194cfebe6d4436b3ceae3ebb 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1178,8 +1178,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 			return ret;
 		break;
 
-	/* speculation barrier */
-	case BPF_ST | BPF_NOSPEC:
+	case BPF_ST | BPF_NOSPEC_V1:
+		/* TODO */
+		break;
+
+	/* speculative store bypass barrier */
+	case BPF_ST | BPF_NOSPEC_V4:
 		/*
 		 * Nothing required here.
 		 *
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index db9342b2d0e6605e8b13179acdc9e61bdb761dee..2a1b553738eea250595b47b7e962b7666eff7317 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1023,7 +1023,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
 		break;
 
 	/* Speculation barrier */
-	case BPF_ST | BPF_NOSPEC:
+	case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+	case BPF_ST | BPF_NOSPEC_V4:
 		break;
 
 	default:
diff --git a/arch/mips/net/bpf_jit_comp32.c b/arch/mips/net/bpf_jit_comp32.c
index ace5db3fbd171fc63421cc716c2f00d4967a8501..33c3fbb277f4696066d506028fcc7d4352b6e04c 100644
--- a/arch/mips/net/bpf_jit_comp32.c
+++ b/arch/mips/net/bpf_jit_comp32.c
@@ -1684,7 +1684,8 @@ int build_insn(const struct bpf_insn *insn, struct jit_context *ctx)
 		emit_stx(ctx, lo(dst), src, off, BPF_SIZE(code));
 		break;
 	/* Speculation barrier */
-	case BPF_ST | BPF_NOSPEC:
+	case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+	case BPF_ST | BPF_NOSPEC_V4:
 		break;
 	/* Atomics */
 	case BPF_STX | BPF_ATOMIC | BPF_W:
diff --git a/arch/mips/net/bpf_jit_comp64.c b/arch/mips/net/bpf_jit_comp64.c
index fa7e9aa37f498d485c887d6eb0fe01bc13cd86c9..c654e3d3b46b8460d7e92cd4d24f35ffe8ffa8b4 100644
--- a/arch/mips/net/bpf_jit_comp64.c
+++ b/arch/mips/net/bpf_jit_comp64.c
@@ -843,7 +843,8 @@ int build_insn(const struct bpf_insn *insn, struct jit_context *ctx)
 		emit_stx(ctx, dst, src, off, BPF_SIZE(code));
 		break;
 	/* Speculation barrier */
-	case BPF_ST | BPF_NOSPEC:
+	case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+	case BPF_ST | BPF_NOSPEC_V4:
 		break;
 	/* Atomics */
 	case BPF_STX | BPF_ATOMIC | BPF_W:
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 7f91ea064c087449d8ed5616dcbbeeec4bbbbec6..d285c17eaa6f4609061dcddfe55ab3ca3c5c800a 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -801,7 +801,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
 		/*
 		 * BPF_ST NOSPEC (speculation barrier)
 		 */
-		case BPF_ST | BPF_NOSPEC:
+		case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+		case BPF_ST | BPF_NOSPEC_V4:
 			break;
 
 		/*
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 0f8048f6dad630b1547cfa9662d569a8a903c693..aa3201923988743299d1710674554b3bcd6b8231 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -727,7 +727,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
 		/*
 		 * BPF_ST NOSPEC (speculation barrier)
 		 */
-		case BPF_ST | BPF_NOSPEC:
+		case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+		case BPF_ST | BPF_NOSPEC_V4:
 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
 				break;
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index 529a83b85c1c934791ed8d18e79b9527c73b83fc..daa093a5835411042e0ada3bed24e1135eda387d 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -1250,7 +1250,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
 		break;
 
 	/* speculation barrier */
-	case BPF_ST | BPF_NOSPEC:
+	case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+	case BPF_ST | BPF_NOSPEC_V4:
 		break;
 
 	case BPF_ST | BPF_MEM | BPF_B:
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 3b4cb713e3684aa246899f5e5570ea1592f57aa6..d957afba967b59ac947002fba9c2a3fbf87d9333 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -1564,7 +1564,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
 		break;
 	}
 	/* speculation barrier */
-	case BPF_ST | BPF_NOSPEC:
+	case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+	case BPF_ST | BPF_NOSPEC_V4:
 		break;
 
 	/* ST: *(size *)(dst + off) = imm */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e507692e51e71e418545ded2350ca9a2c9ee9a43..05c34813403d6605f7e4060ebb47070cbeafbd4e 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1244,7 +1244,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
 	/*
 	 * BPF_NOSPEC (speculation barrier)
 	 */
-	case BPF_ST | BPF_NOSPEC:
+	case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+	case BPF_ST | BPF_NOSPEC_V4:
 		break;
 	/*
 	 * BPF_ST(X)
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index fa0759bfe498e917b0b215d2a24d1269d645161f..2822249f678bcbedfba7bfceb7228bbf627147a4 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1288,7 +1288,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 		break;
 	}
 	/* speculation barrier */
-	case BPF_ST | BPF_NOSPEC:
+	case BPF_ST | BPF_NOSPEC_V1: /* TODO */
+	case BPF_ST | BPF_NOSPEC_V4:
 		break;
 	/* ST: *(size *)(dst + off) = imm */
 	case BPF_ST | BPF_MEM | BPF_W:
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 438adb695daab16cb60cc9239ce8107796a836d7..f8abf2bb8ab10cf744114e58e2257566dc401c29 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1319,7 +1319,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
 			break;
 
 			/* speculation barrier */
-		case BPF_ST | BPF_NOSPEC:
+		case BPF_ST | BPF_NOSPEC_V1:
+		case BPF_ST | BPF_NOSPEC_V4:
 			EMIT_LFENCE();
 			break;
 
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 429a89c5468b5748c01728193d7eeb82fadf2376..1f6d3b5ca24ece65e6f9c8b45cb9cb7786f058a8 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1902,8 +1902,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 			i++;
 			break;
 		}
-		/* speculation barrier */
-		case BPF_ST | BPF_NOSPEC:
+		/* speculation barriers */
+		case BPF_ST | BPF_NOSPEC_V1:
+		case BPF_ST | BPF_NOSPEC_V4:
 			if (boot_cpu_has(X86_FEATURE_XMM2))
 				/* Emit 'lfence' */
 				EMIT3(0x0F, 0xAE, 0xE8);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index b6e58dab8e2756d291970bfcb096d22e914120c5..8b4c9ac9135b89150a3c3b4130fc83e55943980f 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -476,7 +476,9 @@ struct bpf_insn_aux_data {
 	u64 map_key_state; /* constant (32 bit) key tracking for maps */
 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
 	u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
-	bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
+	bool nospec_v1; /* prevent Spectre v1  */
+	bool nospec_v1_result; /* do not execute the following insn speculatively, for Spectre v1 mitigation */
+	bool nospec_v4_result; /* subject to Spectre v4 sanitation */
 	bool zext_dst; /* this insn zero extends dst reg */
 	bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
 	bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index f69114083ec71d9db5475b72af452b2aa830c1f8..a383adab9e7358f0e278e72c95ee532157e2387a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -72,10 +72,15 @@ struct ctl_table_header;
 /* unused opcode to mark call to interpreter with arguments */
 #define BPF_CALL_ARGS	0xe0
 
+/* unused opcode to mark speculation barrier for mitigating
+ * speculative bounds-check bypass and type confusion
+ */
+#define BPF_NOSPEC_V1	0xd0
+
 /* unused opcode to mark speculation barrier for mitigating
  * Speculative Store Bypass
  */
-#define BPF_NOSPEC	0xc0
+#define BPF_NOSPEC_V4	0xc0
 
 /* As per nm, we expose JITed images as text (code) section for
  * kallsyms. That way, tools like perf can find it to match
@@ -393,11 +398,21 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
 		.off   = 0,					\
 		.imm   = 0 })
 
-/* Speculation barrier */
+/* Speculative path execution barrier */
+
+#define BPF_ST_NOSPEC_V1()					\
+	((struct bpf_insn) {					\
+		.code  = BPF_ST | BPF_NOSPEC_V1,		\
+		.dst_reg = 0,					\
+		.src_reg = 0,					\
+		.off   = 0,					\
+		.imm   = 0 })
+
+/* Speculative-store-bypass barrier  */
 
-#define BPF_ST_NOSPEC()						\
+#define BPF_ST_NOSPEC_V4()					\
 	((struct bpf_insn) {					\
-		.code  = BPF_ST | BPF_NOSPEC,			\
+		.code  = BPF_ST | BPF_NOSPEC_V4,		\
 		.dst_reg = 0,					\
 		.src_reg = 0,					\
 		.off   = 0,					\
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index e3e45b651cd4052e1a83f46481eccba03758f6cd..1af2959f3aeed9e43202d655eb326122e66b555b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1655,7 +1655,8 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 		/* Non-UAPI available opcodes. */
 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
-		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
+		[BPF_ST  | BPF_NOSPEC_V1] = &&ST_NOSPEC_V1,
+		[BPF_ST  | BPF_NOSPEC_V4] = &&ST_NOSPEC_V4,
 		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
 		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
 		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
@@ -1902,15 +1903,17 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 	COND_JMP(s, JSLE, <=)
 #undef COND_JMP
 	/* ST, STX and LDX*/
-	ST_NOSPEC:
-		/* Speculation barrier for mitigating Speculative Store Bypass.
-		 * In case of arm64, we rely on the firmware mitigation as
-		 * controlled via the ssbd kernel parameter. Whenever the
-		 * mitigation is enabled, it works for all of the kernel code
-		 * with no need to provide any additional instructions here.
-		 * In case of x86, we use 'lfence' insn for mitigation. We
-		 * reuse preexisting logic from Spectre v1 mitigation that
-		 * happens to produce the required code on x86 for v4 as well.
+	ST_NOSPEC_V1:
+	ST_NOSPEC_V4:
+		/* Speculation barrier for mitigating Speculative Store Bypass,
+		 * Bounds-Check Bypass, and Type Confusion. In case of arm64, we
+		 * rely on the firmware mitigation as controlled via the ssbd
+		 * kernel parameter. Whenever the mitigation is enabled, it
+		 * works for all of the kernel code with no need to provide any
+		 * additional instructions here. In case of x86, we use 'lfence'
+		 * insn for mitigation. We reuse preexisting logic from Spectre
+		 * v1 mitigation that happens to produce the required code on
+		 * x86 for v4 as well.
 		 */
 		barrier_nospec();
 		CONT;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 85e771de6a32527c15d24970b09fb3cd2ca99486..1505eafadf2d4e9cdb63bdf608a3bc5bd51e55bf 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1820,22 +1820,26 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
 	return 0;
 }
 
-static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
-					     int insn_idx, int prev_insn_idx,
-					     bool speculative)
+static int push_stack(struct bpf_verifier_env *env,
+		      int insn_idx, int prev_insn_idx,
+		      bool speculative, struct bpf_verifier_state **branch)
 {
 	struct bpf_verifier_state *cur = env->cur_state;
 	struct bpf_verifier_stack_elem *elem;
 	int err;
 
+	*branch = NULL;
+
 	if (!env->bypass_spec_v1 && cur->speculative && env->stack_size > bpf_spec_v1_complexity_limit_jmp_seq) {
 		verbose(env, "avoiding spec. push_stack()\n");
-		return NULL;
+		return -EACCES;
 	}
 
 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
-	if (!elem)
+	if (!elem) {
+		err = -ENOMEM;
 		goto err;
+	}
 
 	elem->insn_idx = insn_idx;
 	elem->prev_insn_idx = prev_insn_idx;
@@ -1844,12 +1848,15 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
 	env->head = elem;
 	env->stack_size++;
 	err = copy_verifier_state(&elem->st, cur);
-	if (err)
+	if (err) {
+		WARN_ON_ONCE(err != -ENOMEM);
 		goto err;
+	}
 	elem->st.speculative |= speculative;
 	if (env->stack_size > bpf_complexity_limit_jmp_seq) {
 		verbose(env, "The sequence of %d jumps is too complex.\n",
 			env->stack_size);
+		err = -EFAULT;
 		goto err;
 	}
 	if (elem->st.parent) {
@@ -1864,13 +1871,14 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
 		 * which might have large 'branches' count.
 		 */
 	}
-	return &elem->st;
+	*branch = &elem->st;
+	return err;
 err:
 	free_verifier_state(env->cur_state, true);
 	env->cur_state = NULL;
 	/* pop all elements and return */
 	while (!pop_stack(env, NULL, NULL, false));
-	return NULL;
+	return err;
 }
 
 #define CALLER_SAVED_REGS 6
@@ -4325,7 +4333,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
 		}
 
 		if (sanitize)
-			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
+			env->insn_aux_data[insn_idx].nospec_v4_result = true;
 	}
 
 	err = destroy_if_dynptr_stack_slot(env, state, spi);
@@ -7437,9 +7445,10 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
 
 	if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) {
 		/* branch out active iter state */
-		queued_st = push_stack(env, insn_idx + 1, insn_idx, false);
-		if (!queued_st)
-			return -ENOMEM;
+		int err = push_stack(env, insn_idx + 1, insn_idx, false, &queued_st);
+		if (err)
+			return err;
+		BUG_ON(!queued_st);
 
 		queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
 		queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
@@ -11573,10 +11582,13 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
 	return 0;
 }
 
-static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
+static bool can_skip_alu_sanitation(struct bpf_verifier_env *env,
 				    const struct bpf_insn *insn)
 {
-	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
+	/* When we already had to fall back to adding a nospec for some reason,
+	 * we also no longer have to keep track of the alu san. state. */
+	WARN_ON_ONCE(cur_aux(env)->alu_state && cur_aux(env)->nospec_v1_result);
+	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K || cur_aux(env)->nospec_v1_result;
 }
 
 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
@@ -11591,6 +11603,7 @@ static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
 		return REASON_PATHS;
 
 	/* Corresponding fixup done in do_misc_fixups(). */
+	WARN_ON_ONCE(aux->nospec_v1_result);
 	aux->alu_state = alu_state;
 	aux->alu_limit = alu_limit;
 	return 0;
@@ -11617,15 +11630,16 @@ struct bpf_sanitize_info {
 	bool mask_to_left;
 };
 
-static struct bpf_verifier_state *
+static int
 sanitize_speculative_path(struct bpf_verifier_env *env,
 			  const struct bpf_insn *insn,
 			  u32 next_idx, u32 curr_idx)
 {
 	struct bpf_verifier_state *branch;
 	struct bpf_reg_state *regs;
+	int err;
 
-	branch = push_stack(env, next_idx, curr_idx, true);
+	err = push_stack(env, next_idx, curr_idx, true, &branch);
 	if (branch && insn) {
 		regs = branch->frame[branch->curframe]->regs;
 		if (BPF_SRC(insn->code) == BPF_K) {
@@ -11635,7 +11649,7 @@ sanitize_speculative_path(struct bpf_verifier_env *env,
 			mark_reg_unknown(env, regs, insn->src_reg);
 		}
 	}
-	return branch;
+	return err;
 }
 
 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
@@ -11654,7 +11668,6 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 	u8 opcode = BPF_OP(insn->code);
 	u32 alu_state, alu_limit;
 	struct bpf_reg_state tmp;
-	bool ret;
 	int err;
 
 	if (can_skip_alu_sanitation(env, insn))
@@ -11727,11 +11740,11 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 		tmp = *dst_reg;
 		copy_register_state(dst_reg, ptr_reg);
 	}
-	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
+	err = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
 					env->insn_idx);
-	if (!ptr_is_dst_reg && ret)
+	if (!ptr_is_dst_reg && !err)
 		*dst_reg = tmp;
-	return !ret ? REASON_STACK : 0;
+	return (err == -ENOMEM || err == -EFAULT) ? REASON_STACK : err;
 }
 
 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
@@ -11755,6 +11768,7 @@ static int sanitize_err(struct bpf_verifier_env *env,
 	static const char *err = "pointer arithmetic with it prohibited for !root";
 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
 	u32 dst = insn->dst_reg, src = insn->src_reg;
+	struct bpf_insn_aux_data *aux = cur_aux(env);
 
 	switch (reason) {
 	case REASON_BOUNDS:
@@ -11762,13 +11776,16 @@ static int sanitize_err(struct bpf_verifier_env *env,
 			off_reg == dst_reg ? dst : src, err);
 		break;
 	case REASON_TYPE:
-		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
-			off_reg == dst_reg ? src : dst, err);
-		break;
+		/* Register has pointer with unsupported alu operation. */
+		aux->nospec_v1_result = true;
+		aux->alu_state = 0;
+		return 0;
 	case REASON_PATHS:
-		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
-			dst, op, err);
-		break;
+		/* Tried to perform alu op from different maps, paths or
+		 * scalars. */
+		aux->nospec_v1_result = true;
+		aux->alu_state = 0;
+		return 0;
 	case REASON_LIMIT:
 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
 			dst, op, err);
@@ -11802,6 +11819,8 @@ static int check_stack_access_for_ptr_arithmetic(
 				const struct bpf_reg_state *reg,
 				int off)
 {
+	WARN_ON_ONCE(env->bypass_spec_v1);
+
 	if (!tnum_is_const(reg->var_off)) {
 		char tn_buf[48];
 
@@ -11812,9 +11831,8 @@ static int check_stack_access_for_ptr_arithmetic(
 	}
 
 	if (off >= 0 || off < -MAX_BPF_STACK) {
-		verbose(env, "R%d stack pointer arithmetic goes out of range, "
-			"prohibited for !root; off=%d\n", regno, off);
-		return -EACCES;
+		cur_aux(env)->nospec_v1_result = true;
+		cur_aux(env)->alu_state = 0;
 	}
 
 	return 0;
@@ -11840,9 +11858,12 @@ static int sanitize_check_bounds(struct bpf_verifier_env *env,
 		break;
 	case PTR_TO_MAP_VALUE:
 		if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
-			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
-				"prohibited for !root\n", dst);
-			return -EACCES;
+			WARN_ON_ONCE(BPF_OP(insn->code) != BPF_ADD &&
+				     BPF_OP(insn->code) != BPF_SUB);
+			/* dst pointer arithmetic of map value goes out of
+			 * range, prohibited for !root */
+			cur_aux(env)->nospec_v1_result = true;
+			cur_aux(env)->alu_state = 0;
 		}
 		break;
 	default:
@@ -11940,7 +11961,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
 				       &info, false);
 		if (ret < 0)
-			return sanitize_err(env, insn, ret, off_reg, dst_reg);
+			ret = sanitize_err(env, insn, ret, off_reg, dst_reg);
+		if (ret < 0)
+			return ret;
 	}
 
 	switch (opcode) {
@@ -12077,7 +12100,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
 				       &info, true);
 		if (ret < 0)
-			return sanitize_err(env, insn, ret, off_reg, dst_reg);
+			ret = sanitize_err(env, insn, ret, off_reg, dst_reg);
+		if (ret < 0)
+			return ret;
 	}
 
 	return 0;
@@ -12713,7 +12738,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 	if (sanitize_needed(opcode)) {
 		ret = sanitize_val_alu(env, insn);
 		if (ret < 0)
-			return sanitize_err(env, insn, ret, NULL, NULL);
+			ret = sanitize_err(env, insn, ret, NULL, NULL);
+		if (ret < 0)
+			return ret;
 	}
 
 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
@@ -13911,10 +13938,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 		 * the fall-through branch for simulation under speculative
 		 * execution.
 		 */
-		if (!env->bypass_spec_v1 &&
-		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
-					       *insn_idx))
-			return -EFAULT;
+		if (!env->bypass_spec_v1) {
+			err = sanitize_speculative_path(env, insn, *insn_idx + 1, *insn_idx);
+			if (err)
+				return err;
+		}
 		*insn_idx += insn->off;
 		return 0;
 	} else if (pred == 0) {
@@ -13922,18 +13950,23 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 		 * program will go. If needed, push the goto branch for
 		 * simulation under speculative execution.
 		 */
-		if (!env->bypass_spec_v1 &&
-		    !sanitize_speculative_path(env, insn,
-					       *insn_idx + insn->off + 1,
-					       *insn_idx))
-			return -EFAULT;
+		if (!env->bypass_spec_v1) {
+		    err = sanitize_speculative_path(env, insn,
+						    *insn_idx + insn->off + 1,
+						    *insn_idx);
+		    if (err)
+			return err;
+
+		}
 		return 0;
 	}
 
-	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
-				  false);
-	if (!other_branch)
-		return -EFAULT;
+	err = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
+			 false, &other_branch);
+	if (err)
+		return err;
+	BUG_ON(!other_branch);
+	/* TODO: Handle !err && !other_branch. */
 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
 
 	/* detect if we are comparing against a constant value so we can adjust
@@ -16505,9 +16538,9 @@ static int do_check(struct bpf_verifier_env *env)
 					continue;
 				} else if (err == ALL_PATHS_CHECKED) {
 					break;
-				} else if (err) {
-					BUG_ON(err > 0);
-					return err;
+				} else {
+					WARN_ON_ONCE(true);
+					return -EINVAL;
 				}
 			}
 		}
@@ -16562,16 +16595,60 @@ static int do_check(struct bpf_verifier_env *env)
 		sanitize_mark_insn_seen(env);
 		prev_insn_idx = env->insn_idx;
 
+		if (!env->bypass_spec_v1 && state->speculative && cur_aux(env)->nospec_v1) {
+			err = process_bpf_exit(env, &prev_insn_idx, pop_log, &do_print_state);
+			if (err == CHECK_NEXT_INSN) {
+				continue;
+			} else if (err == ALL_PATHS_CHECKED) {
+				break;
+			} else {
+				WARN_ON_ONCE(err != -ENOMEM);
+				verbose(env, "speculative bpf_process_exit() because of nospec_v1 failed");
+				return err;
+			}
+		}
+
 		err = do_check_insn(env, insn, pop_log, &do_print_state, regs, state, &prev_insn_idx);
 		if (err == CHECK_NEXT_INSN) {
 			continue;
 		} else if (err == ALL_PATHS_CHECKED) {
 			break;
+		} else if ((err == -EPERM || err == -EACCES || err == -EINVAL)
+			   && state->speculative) {
+			WARN_ON_ONCE(env->bypass_spec_v1);
+			BUG_ON(env->cur_state != state);
+
+			/* Terminate this speculative path forcefully. */
+			cur_aux(env)->nospec_v1 = true;
+
+			err = process_bpf_exit(env, &prev_insn_idx, pop_log, &do_print_state);
+			if (err == CHECK_NEXT_INSN) {
+				continue;
+			} else if (err == ALL_PATHS_CHECKED) {
+				break;
+			} else {
+				WARN_ON_ONCE(err != -ENOMEM);
+				verbose(env, "speculative bpf_process_exit() because of do_check_insn() failure failed");
+				return err;
+			}
 		} else if (err) {
-			BUG_ON(err > 0);
+			WARN_ON_ONCE(err > 0);
 			return err;
 		}
 
+		if (!env->bypass_spec_v1 && state->speculative && cur_aux(env)->nospec_v1_result) {
+			err = process_bpf_exit(env, &prev_insn_idx, pop_log, &do_print_state);
+			if (err == CHECK_NEXT_INSN) {
+				continue;
+			} else if (err == ALL_PATHS_CHECKED) {
+				break;
+			} else {
+				WARN_ON_ONCE(err != -ENOMEM);
+				verbose(env, "speculative bpf_process_exit() because of nospec_v1_result failed");
+				return err;
+			}
+		}
+
 		env->insn_idx++;
 	}
 
@@ -17573,10 +17650,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
 		}
 
 		if (type == BPF_WRITE &&
-		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
+		    env->insn_aux_data[i + delta].nospec_v4_result) {
 			struct bpf_insn patch[] = {
 				*insn,
-				BPF_ST_NOSPEC(),
+				BPF_ST_NOSPEC_V4(),
 			};
 
 			cnt = ARRAY_SIZE(patch);
@@ -18196,6 +18273,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
 			if (!aux->alu_state ||
 			    aux->alu_state == BPF_ALU_NON_POINTER)
 				continue;
+			WARN_ON_ONCE(aux->nospec_v1_result);
 
 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==