...
Run Format

Text file src/internal/runtime/atomic/atomic_arm64.s

Documentation: internal/runtime/atomic

     1// Copyright 2014 The Go Authors. All rights reserved.
     2// Use of this source code is governed by a BSD-style
     3// license that can be found in the LICENSE file.
     4
     5#include "go_asm.h"
     6#include "textflag.h"
     7
     8TEXT ·Casint32(SB), NOSPLIT, $0-17
     9	B	·Cas(SB)
    10
    11TEXT ·Casint64(SB), NOSPLIT, $0-25
    12	B	·Cas64(SB)
    13
    14TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    15	B	·Cas64(SB)
    16
    17TEXT ·CasRel(SB), NOSPLIT, $0-17
    18	B	·Cas(SB)
    19
    20TEXT ·Loadint32(SB), NOSPLIT, $0-12
    21	B	·Load(SB)
    22
    23TEXT ·Loadint64(SB), NOSPLIT, $0-16
    24	B	·Load64(SB)
    25
    26TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
    27	B	·Load64(SB)
    28
    29TEXT ·Loaduint(SB), NOSPLIT, $0-16
    30	B	·Load64(SB)
    31
    32TEXT ·Storeint32(SB), NOSPLIT, $0-12
    33	B	·Store(SB)
    34
    35TEXT ·Storeint64(SB), NOSPLIT, $0-16
    36	B	·Store64(SB)
    37
    38TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
    39	B	·Store64(SB)
    40
    41TEXT ·Xaddint32(SB), NOSPLIT, $0-20
    42	B	·Xadd(SB)
    43
    44TEXT ·Xaddint64(SB), NOSPLIT, $0-24
    45	B	·Xadd64(SB)
    46
    47TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
    48	B	·Xadd64(SB)
    49
    50TEXT ·Casp1(SB), NOSPLIT, $0-25
    51	B ·Cas64(SB)
    52
    53// uint32 ·Load(uint32 volatile* addr)
    54TEXT ·Load(SB),NOSPLIT,$0-12
    55	MOVD	ptr+0(FP), R0
    56	LDARW	(R0), R0
    57	MOVW	R0, ret+8(FP)
    58	RET
    59
    60// uint8 ·Load8(uint8 volatile* addr)
    61TEXT ·Load8(SB),NOSPLIT,$0-9
    62	MOVD	ptr+0(FP), R0
    63	LDARB	(R0), R0
    64	MOVB	R0, ret+8(FP)
    65	RET
    66
    67// uint64 ·Load64(uint64 volatile* addr)
    68TEXT ·Load64(SB),NOSPLIT,$0-16
    69	MOVD	ptr+0(FP), R0
    70	LDAR	(R0), R0
    71	MOVD	R0, ret+8(FP)
    72	RET
    73
    74// void *·Loadp(void *volatile *addr)
    75TEXT ·Loadp(SB),NOSPLIT,$0-16
    76	MOVD	ptr+0(FP), R0
    77	LDAR	(R0), R0
    78	MOVD	R0, ret+8(FP)
    79	RET
    80
    81// uint32 ·LoadAcq(uint32 volatile* addr)
    82TEXT ·LoadAcq(SB),NOSPLIT,$0-12
    83	B	·Load(SB)
    84
    85// uint64 ·LoadAcquintptr(uint64 volatile* addr)
    86TEXT ·LoadAcq64(SB),NOSPLIT,$0-16
    87	B	·Load64(SB)
    88
    89// uintptr ·LoadAcq64(uintptr volatile* addr)
    90TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16
    91	B	·Load64(SB)
    92
    93TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
    94	B	·Store64(SB)
    95
    96TEXT ·StoreRel(SB), NOSPLIT, $0-12
    97	B	·Store(SB)
    98
    99TEXT ·StoreRel64(SB), NOSPLIT, $0-16
   100	B	·Store64(SB)
   101
   102TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
   103	B	·Store64(SB)
   104
   105TEXT ·Store(SB), NOSPLIT, $0-12
   106	MOVD	ptr+0(FP), R0
   107	MOVW	val+8(FP), R1
   108	STLRW	R1, (R0)
   109	RET
   110
   111TEXT ·Store8(SB), NOSPLIT, $0-9
   112	MOVD	ptr+0(FP), R0
   113	MOVB	val+8(FP), R1
   114	STLRB	R1, (R0)
   115	RET
   116
   117TEXT ·Store64(SB), NOSPLIT, $0-16
   118	MOVD	ptr+0(FP), R0
   119	MOVD	val+8(FP), R1
   120	STLR	R1, (R0)
   121	RET
   122
   123// uint8 Xchg(ptr *uint8, new uint8)
   124// Atomically:
   125//	old := *ptr;
   126//	*ptr = new;
   127//	return old;
   128TEXT ·Xchg8(SB), NOSPLIT, $0-17
   129	MOVD	ptr+0(FP), R0
   130	MOVB	new+8(FP), R1
   131#ifndef GOARM64_LSE
   132	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   133	CBZ 	R4, load_store_loop
   134#endif
   135	SWPALB	R1, (R0), R2
   136	MOVB	R2, ret+16(FP)
   137	RET
   138#ifndef GOARM64_LSE
   139load_store_loop:
   140	LDAXRB	(R0), R2
   141	STLXRB	R1, (R0), R3
   142	CBNZ	R3, load_store_loop
   143	MOVB	R2, ret+16(FP)
   144	RET
   145#endif
   146
   147// uint32 Xchg(ptr *uint32, new uint32)
   148// Atomically:
   149//	old := *ptr;
   150//	*ptr = new;
   151//	return old;
   152TEXT ·Xchg(SB), NOSPLIT, $0-20
   153	MOVD	ptr+0(FP), R0
   154	MOVW	new+8(FP), R1
   155#ifndef GOARM64_LSE
   156	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   157	CBZ 	R4, load_store_loop
   158#endif
   159	SWPALW	R1, (R0), R2
   160	MOVW	R2, ret+16(FP)
   161	RET
   162#ifndef GOARM64_LSE
   163load_store_loop:
   164	LDAXRW	(R0), R2
   165	STLXRW	R1, (R0), R3
   166	CBNZ	R3, load_store_loop
   167	MOVW	R2, ret+16(FP)
   168	RET
   169#endif
   170
   171// uint64 Xchg64(ptr *uint64, new uint64)
   172// Atomically:
   173//	old := *ptr;
   174//	*ptr = new;
   175//	return old;
   176TEXT ·Xchg64(SB), NOSPLIT, $0-24
   177	MOVD	ptr+0(FP), R0
   178	MOVD	new+8(FP), R1
   179#ifndef GOARM64_LSE
   180	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   181	CBZ 	R4, load_store_loop
   182#endif
   183	SWPALD	R1, (R0), R2
   184	MOVD	R2, ret+16(FP)
   185	RET
   186#ifndef GOARM64_LSE
   187load_store_loop:
   188	LDAXR	(R0), R2
   189	STLXR	R1, (R0), R3
   190	CBNZ	R3, load_store_loop
   191	MOVD	R2, ret+16(FP)
   192	RET
   193#endif
   194
   195// func Cas(ptr *uint32, old, new uint32) bool
   196// Atomically:
   197//	if *ptr == old {
   198//		*ptr = new
   199//		return true
   200//	} else {
   201//		return false
   202// 	}
   203TEXT ·Cas(SB), NOSPLIT, $0-17
   204	MOVD	ptr+0(FP), R0
   205	MOVW	old+8(FP), R1
   206	MOVW	new+12(FP), R2
   207#ifndef GOARM64_LSE
   208	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   209	CBZ 	R4, load_store_loop
   210#endif
   211	MOVD	R1, R3
   212	CASALW	R3, (R0), R2
   213	CMP 	R1, R3
   214	CSET	EQ, R0
   215	MOVB	R0, ret+16(FP)
   216	RET
   217#ifndef GOARM64_LSE
   218load_store_loop:
   219	LDAXRW	(R0), R3
   220	CMPW	R1, R3
   221	BNE	ok
   222	STLXRW	R2, (R0), R3
   223	CBNZ	R3, load_store_loop
   224ok:
   225	CSET	EQ, R0
   226	MOVB	R0, ret+16(FP)
   227	RET
   228#endif
   229
   230// func Cas64(ptr *uint64, old, new uint64) bool
   231// Atomically:
   232//	if *ptr == old {
   233//		*ptr = new
   234//		return true
   235//	} else {
   236//		return false
   237//	}
   238TEXT ·Cas64(SB), NOSPLIT, $0-25
   239	MOVD	ptr+0(FP), R0
   240	MOVD	old+8(FP), R1
   241	MOVD	new+16(FP), R2
   242#ifndef GOARM64_LSE
   243	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   244	CBZ 	R4, load_store_loop
   245#endif
   246	MOVD	R1, R3
   247	CASALD	R3, (R0), R2
   248	CMP 	R1, R3
   249	CSET	EQ, R0
   250	MOVB	R0, ret+24(FP)
   251	RET
   252#ifndef GOARM64_LSE
   253load_store_loop:
   254	LDAXR	(R0), R3
   255	CMP	R1, R3
   256	BNE	ok
   257	STLXR	R2, (R0), R3
   258	CBNZ	R3, load_store_loop
   259ok:
   260	CSET	EQ, R0
   261	MOVB	R0, ret+24(FP)
   262	RET
   263#endif
   264
   265// uint32 xadd(uint32 volatile *ptr, int32 delta)
   266// Atomically:
   267//      *val += delta;
   268//      return *val;
   269TEXT ·Xadd(SB), NOSPLIT, $0-20
   270	MOVD	ptr+0(FP), R0
   271	MOVW	delta+8(FP), R1
   272#ifndef GOARM64_LSE
   273	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   274	CBZ 	R4, load_store_loop
   275#endif
   276	LDADDALW	R1, (R0), R2
   277	ADD 	R1, R2
   278	MOVW	R2, ret+16(FP)
   279	RET
   280#ifndef GOARM64_LSE
   281load_store_loop:
   282	LDAXRW	(R0), R2
   283	ADDW	R2, R1, R2
   284	STLXRW	R2, (R0), R3
   285	CBNZ	R3, load_store_loop
   286	MOVW	R2, ret+16(FP)
   287	RET
   288#endif
   289
   290// uint64 Xadd64(uint64 volatile *ptr, int64 delta)
   291// Atomically:
   292//      *val += delta;
   293//      return *val;
   294TEXT ·Xadd64(SB), NOSPLIT, $0-24
   295	MOVD	ptr+0(FP), R0
   296	MOVD	delta+8(FP), R1
   297#ifndef GOARM64_LSE
   298	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   299	CBZ 	R4, load_store_loop
   300#endif
   301	LDADDALD	R1, (R0), R2
   302	ADD 	R1, R2
   303	MOVD	R2, ret+16(FP)
   304	RET
   305#ifndef GOARM64_LSE
   306load_store_loop:
   307	LDAXR	(R0), R2
   308	ADD	R2, R1, R2
   309	STLXR	R2, (R0), R3
   310	CBNZ	R3, load_store_loop
   311	MOVD	R2, ret+16(FP)
   312	RET
   313#endif
   314
   315TEXT ·Xchgint32(SB), NOSPLIT, $0-20
   316	B	·Xchg(SB)
   317
   318TEXT ·Xchgint64(SB), NOSPLIT, $0-24
   319	B	·Xchg64(SB)
   320
   321TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   322	B	·Xchg64(SB)
   323
   324TEXT ·And8(SB), NOSPLIT, $0-9
   325	MOVD	ptr+0(FP), R0
   326	MOVB	val+8(FP), R1
   327#ifndef GOARM64_LSE
   328	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   329	CBZ 	R4, load_store_loop
   330#endif
   331	MVN 	R1, R2
   332	LDCLRALB	R2, (R0), R3
   333	RET
   334#ifndef GOARM64_LSE
   335load_store_loop:
   336	LDAXRB	(R0), R2
   337	AND	R1, R2
   338	STLXRB	R2, (R0), R3
   339	CBNZ	R3, load_store_loop
   340	RET
   341#endif
   342
   343TEXT ·Or8(SB), NOSPLIT, $0-9
   344	MOVD	ptr+0(FP), R0
   345	MOVB	val+8(FP), R1
   346#ifndef GOARM64_LSE
   347	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   348	CBZ 	R4, load_store_loop
   349#endif
   350	LDORALB	R1, (R0), R2
   351	RET
   352#ifndef GOARM64_LSE
   353load_store_loop:
   354	LDAXRB	(R0), R2
   355	ORR	R1, R2
   356	STLXRB	R2, (R0), R3
   357	CBNZ	R3, load_store_loop
   358	RET
   359#endif
   360
   361// func And(addr *uint32, v uint32)
   362TEXT ·And(SB), NOSPLIT, $0-12
   363	MOVD	ptr+0(FP), R0
   364	MOVW	val+8(FP), R1
   365#ifndef GOARM64_LSE
   366	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   367	CBZ 	R4, load_store_loop
   368#endif
   369	MVN 	R1, R2
   370	LDCLRALW	R2, (R0), R3
   371	RET
   372#ifndef GOARM64_LSE
   373load_store_loop:
   374	LDAXRW	(R0), R2
   375	AND	R1, R2
   376	STLXRW	R2, (R0), R3
   377	CBNZ	R3, load_store_loop
   378	RET
   379#endif
   380
   381// func Or(addr *uint32, v uint32)
   382TEXT ·Or(SB), NOSPLIT, $0-12
   383	MOVD	ptr+0(FP), R0
   384	MOVW	val+8(FP), R1
   385#ifndef GOARM64_LSE
   386	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   387	CBZ 	R4, load_store_loop
   388#endif
   389	LDORALW	R1, (R0), R2
   390	RET
   391#ifndef GOARM64_LSE
   392load_store_loop:
   393	LDAXRW	(R0), R2
   394	ORR	R1, R2
   395	STLXRW	R2, (R0), R3
   396	CBNZ	R3, load_store_loop
   397	RET
   398#endif
   399
   400// func Or32(addr *uint32, v uint32) old uint32
   401TEXT ·Or32(SB), NOSPLIT, $0-20
   402	MOVD	ptr+0(FP), R0
   403	MOVW	val+8(FP), R1
   404#ifndef GOARM64_LSE
   405	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   406	CBZ 	R4, load_store_loop
   407#endif
   408	LDORALW	R1, (R0), R2
   409	MOVD	R2, ret+16(FP)
   410	RET
   411#ifndef GOARM64_LSE
   412load_store_loop:
   413	LDAXRW	(R0), R2
   414	ORR	R1, R2, R3
   415	STLXRW	R3, (R0), R4
   416	CBNZ	R4, load_store_loop
   417	MOVD R2, ret+16(FP)
   418	RET
   419#endif
   420
   421// func And32(addr *uint32, v uint32) old uint32
   422TEXT ·And32(SB), NOSPLIT, $0-20
   423	MOVD	ptr+0(FP), R0
   424	MOVW	val+8(FP), R1
   425#ifndef GOARM64_LSE
   426	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   427	CBZ 	R4, load_store_loop
   428#endif
   429	MVN 	R1, R2
   430	LDCLRALW	R2, (R0), R3
   431	MOVD	R3, ret+16(FP)
   432	RET
   433#ifndef GOARM64_LSE
   434load_store_loop:
   435	LDAXRW	(R0), R2
   436	AND	R1, R2, R3
   437	STLXRW	R3, (R0), R4
   438	CBNZ	R4, load_store_loop
   439	MOVD R2, ret+16(FP)
   440	RET
   441#endif
   442
   443// func Or64(addr *uint64, v uint64) old uint64
   444TEXT ·Or64(SB), NOSPLIT, $0-24
   445	MOVD	ptr+0(FP), R0
   446	MOVD	val+8(FP), R1
   447#ifndef GOARM64_LSE
   448	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   449	CBZ 	R4, load_store_loop
   450#endif
   451	LDORALD	R1, (R0), R2
   452	MOVD	R2, ret+16(FP)
   453	RET
   454#ifndef GOARM64_LSE
   455load_store_loop:
   456	LDAXR	(R0), R2
   457	ORR	R1, R2, R3
   458	STLXR	R3, (R0), R4
   459	CBNZ	R4, load_store_loop
   460	MOVD 	R2, ret+16(FP)
   461	RET
   462#endif
   463
   464// func And64(addr *uint64, v uint64) old uint64
   465TEXT ·And64(SB), NOSPLIT, $0-24
   466	MOVD	ptr+0(FP), R0
   467	MOVD	val+8(FP), R1
   468#ifndef GOARM64_LSE
   469	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   470	CBZ 	R4, load_store_loop
   471#endif
   472	MVN 	R1, R2
   473	LDCLRALD	R2, (R0), R3
   474	MOVD	R3, ret+16(FP)
   475	RET
   476#ifndef GOARM64_LSE
   477load_store_loop:
   478	LDAXR	(R0), R2
   479	AND	R1, R2, R3
   480	STLXR	R3, (R0), R4
   481	CBNZ	R4, load_store_loop
   482	MOVD 	R2, ret+16(FP)
   483	RET
   484#endif
   485
   486// func Anduintptr(addr *uintptr, v uintptr) old uintptr
   487TEXT ·Anduintptr(SB), NOSPLIT, $0-24
   488	B	·And64(SB)
   489
   490// func Oruintptr(addr *uintptr, v uintptr) old uintptr
   491TEXT ·Oruintptr(SB), NOSPLIT, $0-24
   492	B	·Or64(SB)

View as plain text