@@ -66,7 +66,7 @@ enum MemoryOrder
6666 * Returns:
6767 * The value of 'val'.
6868 */
69- T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)(ref const T val) pure nothrow @nogc @trusted
69+ T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)(return ref const T val) pure nothrow @nogc @trusted
7070 if (! is (T == shared U, U) && ! is (T == shared inout U, U) && ! is (T == shared const U, U))
7171{
7272 static if (__traits(isFloating, T))
@@ -80,7 +80,7 @@ T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(ref const T val) pure nothrow
8080}
8181
8282// / Ditto
83- T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)(ref shared const T val) pure nothrow @nogc @trusted
83+ T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)(return ref shared const T val) pure nothrow @nogc @trusted
8484 if (! hasUnsharedIndirections! T)
8585{
8686 import core.internal.traits : hasUnsharedIndirections;
@@ -90,7 +90,7 @@ T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(ref shared const T val) pure n
9090}
9191
9292// / Ditto
93- TailShared! T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)(ref shared const T val) pure nothrow @nogc @trusted
93+ TailShared! T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)(return shared const T val) pure nothrow @nogc @trusted
9494 if (hasUnsharedIndirections! T)
9595{
9696 // HACK: DEPRECATE THIS FUNCTION, IT IS INVALID TO DO ATOMIC LOAD OF SHARED CLASS
@@ -162,7 +162,7 @@ void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, share
162162 * Returns:
163163 * The value held previously by `val`.
164164 */
165- T atomicFetchAdd (MemoryOrder ms = MemoryOrder.seq, T)(ref T val, size_t mod) pure nothrow @nogc @trusted
165+ T atomicFetchAdd (MemoryOrder ms = MemoryOrder.seq, T)(return ref T val, size_t mod) pure nothrow @nogc @trusted
166166 if ((__traits(isIntegral, T) || is (T == U* , U)) && ! is (T == shared ))
167167in (atomicValueIsProperlyAligned(val))
168168{
@@ -173,7 +173,7 @@ in (atomicValueIsProperlyAligned(val))
173173}
174174
175175// / Ditto
176- T atomicFetchAdd (MemoryOrder ms = MemoryOrder.seq, T)(ref shared T val, size_t mod) pure nothrow @nogc @trusted
176+ T atomicFetchAdd (MemoryOrder ms = MemoryOrder.seq, T)(return ref shared T val, size_t mod) pure nothrow @nogc @trusted
177177 if (__traits(isIntegral, T) || is (T == U* , U))
178178in (atomicValueIsProperlyAligned(val))
179179{
@@ -191,7 +191,7 @@ in (atomicValueIsProperlyAligned(val))
191191 * Returns:
192192 * The value held previously by `val`.
193193 */
194- T atomicFetchSub (MemoryOrder ms = MemoryOrder.seq, T)(ref T val, size_t mod) pure nothrow @nogc @trusted
194+ T atomicFetchSub (MemoryOrder ms = MemoryOrder.seq, T)(return ref T val, size_t mod) pure nothrow @nogc @trusted
195195 if ((__traits(isIntegral, T) || is (T == U* , U)) && ! is (T == shared ))
196196in (atomicValueIsProperlyAligned(val))
197197{
@@ -202,7 +202,7 @@ in (atomicValueIsProperlyAligned(val))
202202}
203203
204204// / Ditto
205- T atomicFetchSub (MemoryOrder ms = MemoryOrder.seq, T)(ref shared T val, size_t mod) pure nothrow @nogc @trusted
205+ T atomicFetchSub (MemoryOrder ms = MemoryOrder.seq, T)(return ref shared T val, size_t mod) pure nothrow @nogc @trusted
206206 if (__traits(isIntegral, T) || is (T == U* , U))
207207in (atomicValueIsProperlyAligned(val))
208208{
0 commit comments