From c928fce274ac0a51ad814437e18a0c07fa6dbca3 Mon Sep 17 00:00:00 2001
From: Mark Mentovai <mark@mentovai.com>
Date: Sat, 28 Sep 2024 00:30:59 -0400
Subject: [PATCH] xray: allow xray_trampoline_x86_64.S to build with Xcode 16
 clang
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This is a backport of c57c7b7c9960 to llvm-12, adapted because that
patch does not apply without b46c89892fe2, which llvm-12 does not
include. The original change’s description:

> [PATCH] [xray] Use L* instead of .L* for Mach-O
>
> Note: Mach-O support is not yet done and check-xray is not allowed yet.
---
 compiler-rt/lib/xray/xray_trampoline_x86_64.S | 28 +++++++++----------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/compiler-rt/lib/xray/xray_trampoline_x86_64.S b/compiler-rt/lib/xray/xray_trampoline_x86_64.S
index 12c5a6ccd9a4..af8ad32c6e3e 100644
--- a/compiler-rt/lib/xray/xray_trampoline_x86_64.S
+++ b/compiler-rt/lib/xray/xray_trampoline_x86_64.S
@@ -110,14 +110,14 @@ ASM_SYMBOL(__xray_FunctionEntry):
 	// On x86/amd64, a simple (type-aligned) MOV instruction is enough.
 	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
 	testq	%rax, %rax
-	je	.Ltmp0
+	je	LOCAL_LABEL(tmp0)
 
 	// The patched function prologue puts its xray_instr_map index into %r10d.
 	movl	%r10d, %edi
 	xor	%esi,%esi
 	ALIGNED_CALL_RAX
 
-.Ltmp0:
+LOCAL_LABEL(tmp0):
 	RESTORE_REGISTERS
 	retq
 # LLVM-MCA-END
@@ -145,13 +145,13 @@ ASM_SYMBOL(__xray_FunctionExit):
 	movq	%rdx, 0(%rsp)
 	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
 	testq %rax,%rax
-	je	.Ltmp2
+	je	LOCAL_LABEL(tmp2)
 
 	movl	%r10d, %edi
 	movl	$1, %esi
   ALIGNED_CALL_RAX
 
-.Ltmp2:
+LOCAL_LABEL(tmp2):
 	// Restore the important registers.
 	movq  48(%rsp), %rbp
 	movupd	32(%rsp), %xmm0
@@ -178,14 +178,14 @@ ASM_SYMBOL(__xray_FunctionTailExit):
 
 	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
 	testq %rax,%rax
-	je	.Ltmp4
+	je	LOCAL_LABEL(tmp4)
 
 	movl	%r10d, %edi
 	movl	$2, %esi
 
   ALIGNED_CALL_RAX
 
-.Ltmp4:
+LOCAL_LABEL(tmp4):
 	RESTORE_REGISTERS
 	retq
 # LLVM-MCA-END
@@ -206,14 +206,14 @@ ASM_SYMBOL(__xray_ArgLoggerEntry):
 	// Again, these function pointer loads must be atomic; MOV is fine.
 	movq	ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
 	testq	%rax, %rax
-	jne	.Larg1entryLog
+	jne	LOCAL_LABEL(arg1entryLog)
 
 	// If [arg1 logging handler] not set, defer to no-arg logging.
 	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
 	testq	%rax, %rax
-	je	.Larg1entryFail
+	je	LOCAL_LABEL(arg1entryFail)
 
-.Larg1entryLog:
+LOCAL_LABEL(arg1entryLog):
 
 	// First argument will become the third
 	movq	%rdi, %rdx
@@ -225,7 +225,7 @@ ASM_SYMBOL(__xray_ArgLoggerEntry):
 	movl	%r10d, %edi
 	ALIGNED_CALL_RAX
 
-.Larg1entryFail:
+LOCAL_LABEL(arg1entryFail):
 	RESTORE_REGISTERS
 	retq
 # LLVM-MCA-END
@@ -247,11 +247,11 @@ ASM_SYMBOL(__xray_CustomEvent):
 	// already.
 	movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
 	testq %rax,%rax
-	je .LcustomEventCleanup
+	je LOCAL_LABEL(customEventCleanup)
 
 	ALIGNED_CALL_RAX
 
-.LcustomEventCleanup:
+LOCAL_LABEL(customEventCleanup):
 	RESTORE_REGISTERS
 	retq
 # LLVM-MCA-END
@@ -273,11 +273,11 @@ ASM_SYMBOL(__xray_TypedEvent):
 	// and rdx without our intervention.
 	movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
 	testq %rax,%rax
-	je .LtypedEventCleanup
+	je LOCAL_LABEL(typedEventCleanup)
 
 	ALIGNED_CALL_RAX
 
-.LtypedEventCleanup:
+LOCAL_LABEL(typedEventCleanup):
 	RESTORE_REGISTERS
 	retq
 # LLVM-MCA-END
-- 
2.46.2

