#include <pthread.h>
#include <pthreadP.h>
#include <lowlevellock.h>
-#include <htmintrin.h>
+#include <htm.h>
#include <elision-conf.h>
#define aconf __elision_aconf
int
__lll_trylock_elision (int *futex, short *adapt_count)
{
- __asm__ __volatile__ (".machinemode \"zarch_nohighgprs\"\n\t"
- ".machine \"all\""
- : : : "memory");
-
/* Implement POSIX semantics by forbiding nesting elided trylocks.
Sorry. After the abort the code is re-executed
non transactional and if the lock was already locked
return an error. */
- if (__builtin_tx_nesting_depth () > 0)
+ if (__libc_tx_nesting_depth () > 0)
{
/* Note that this abort may terminate an outermost transaction that
was created outside glibc.
them to use the default lock instead of retrying transactions
until their try_tbegin is zero.
*/
- __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
+ __libc_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
}
/* Only try a transaction if it's worth it. See __lll_lock_elision for
just a hint. */
if (atomic_load_relaxed (adapt_count) <= 0)
{
- unsigned status;
+ int status;
if (__builtin_expect
- ((status = __builtin_tbegin ((void *)0)) == _HTM_TBEGIN_STARTED, 1))
+ ((status = __libc_tbegin ((void *) 0)) == _HTM_TBEGIN_STARTED, 1))
{
if (*futex == 0)
return 0;
/* Lock was busy. Fall back to normal locking. */
/* Since we are in a non-nested transaction there is no need to abort,
which is expensive. */
- __builtin_tend ();
+ __libc_tend ();
/* Note: Changing the adapt_count here might abort a transaction on a
different cpu, but that could happen anyway when the futex is
acquired, so there's no need to check the nesting depth here.
--- /dev/null
+/* Shared HTM header. Work around false transactional execution facility
+ intrinsics.
+
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _HTM_H
+#define _HTM_H 1
+
+#include <htmintrin.h>
+
+#ifdef __s390x__
+# define TX_FPRS_BYTES 64
+# define TX_SAVE_FPRS \
+ " std %%f8, 0(%[R_FPRS])\n\t" \
+ " std %%f9, 8(%[R_FPRS])\n\t" \
+ " std %%f10, 16(%[R_FPRS])\n\t" \
+ " std %%f11, 24(%[R_FPRS])\n\t" \
+ " std %%f12, 32(%[R_FPRS])\n\t" \
+ " std %%f13, 40(%[R_FPRS])\n\t" \
+ " std %%f14, 48(%[R_FPRS])\n\t" \
+ " std %%f15, 56(%[R_FPRS])\n\t"
+
+# define TX_RESTORE_FPRS \
+ " ld %%f8, 0(%[R_FPRS])\n\t" \
+ " ld %%f9, 8(%[R_FPRS])\n\t" \
+ " ld %%f10, 16(%[R_FPRS])\n\t" \
+ " ld %%f11, 24(%[R_FPRS])\n\t" \
+ " ld %%f12, 32(%[R_FPRS])\n\t" \
+ " ld %%f13, 40(%[R_FPRS])\n\t" \
+ " ld %%f14, 48(%[R_FPRS])\n\t" \
+ " ld %%f15, 56(%[R_FPRS])\n\t"
+
+#else
+
+# define TX_FPRS_BYTES 16
+# define TX_SAVE_FPRS \
+ " std %%f4, 0(%[R_FPRS])\n\t" \
+ " std %%f6, 8(%[R_FPRS])\n\t"
+
+# define TX_RESTORE_FPRS \
+ " ld %%f4, 0(%[R_FPRS])\n\t" \
+ " ld %%f6, 8(%[R_FPRS])\n\t"
+
+#endif /* ! __s390x__ */
+
+/* Use own inline assembly instead of __builtin_tbegin, as tbegin
+ has to filter program interruptions which can't be done with the builtin.
+ Now the fprs have to be saved / restored here, too.
+ The fpc is also not saved / restored with the builtin.
+ The used inline assembly does not clobber the volatile fprs / vrs!
+ Clobbering the latter ones would force the compiler to save / restore
+ the call saved fprs as those overlap with the vrs, but they only need to be
+ restored if the transaction fails but not if the transaction is successfully
+ started. Thus the user of the tbegin macros in this header file has to
+ compile the file / function with -msoft-float. It prevents gcc from using
+ fprs / vrs. */
+#define __libc_tbegin(tdb) \
+ ({ int __ret; \
+ int __fpc; \
+ char __fprs[TX_FPRS_BYTES]; \
+ __asm__ __volatile__ (".machine push\n\t" \
+ ".machinemode \"zarch_nohighgprs\"\n\t" \
+ ".machine \"all\"\n\t" \
+ /* Save state at the outermost transaction. \
+ As extracting nesting depth is expensive \
+ on at least zEC12, save fprs at inner \
+ transactions, too. \
+ The fpc and fprs are saved here as they \
+ are not saved by tbegin. There exist no \
+ call-saved vrs, thus they are not saved \
+ here. */ \
+ " efpc %[R_FPC]\n\t" \
+ TX_SAVE_FPRS \
+ /* Begin transaction: save all gprs, allow \
+ ar modification and fp operations. Some \
+ program-interruptions (e.g. a null \
+ pointer access) are filtered and the \
+ trancsaction will abort. In this case \
+ the normal lock path will execute it \
+ again and result in a core dump wich does \
+ now show at tbegin but the real executed \
+ instruction. */ \
+ " tbegin 0, 0xFF0E\n\t" \
+ /* Branch away in abort case (this is the \
+ prefered sequence. See PoP in chapter 5 \
+ Transactional-Execution Facility \
+ Operation). */ \
+ " jnz 0f\n\t" \
+ /* Transaction has successfully started. */ \
+ " lhi %[R_RET], 0\n\t" \
+ " j 1f\n\t" \
+ /* Transaction has aborted. Now we are at \
+ the outermost transaction. Restore fprs \
+ and fpc. */ \
+ "0: ipm %[R_RET]\n\t" \
+ " srl %[R_RET], 28\n\t" \
+ " sfpc %[R_FPC]\n\t" \
+ TX_RESTORE_FPRS \
+ "1:\n\t" \
+ ".machine pop\n" \
+ : [R_RET] "=&d" (__ret), \
+ [R_FPC] "=&d" (__fpc) \
+ : [R_FPRS] "a" (__fprs) \
+ : "cc", "memory"); \
+ __ret; \
+ })
+
+/* These builtins are correct. Use them. */
+#define __libc_tend() \
+ ({ __asm__ __volatile__ (".machine push\n\t" \
+ ".machinemode \"zarch_nohighgprs\"\n\t" \
+ ".machine \"all\"\n\t"); \
+ int __ret = __builtin_tend (); \
+ __asm__ __volatile__ (".machine pop"); \
+ __ret; \
+ })
+
+#define __libc_tabort(abortcode) \
+ __asm__ __volatile__ (".machine push\n\t" \
+ ".machinemode \"zarch_nohighgprs\"\n\t" \
+ ".machine \"all\"\n\t"); \
+ __builtin_tabort (abortcode); \
+ __asm__ __volatile__ (".machine pop")
+
+#define __libc_tx_nesting_depth() \
+ ({ __asm__ __volatile__ (".machine push\n\t" \
+ ".machinemode \"zarch_nohighgprs\"\n\t" \
+ ".machine \"all\"\n\t"); \
+ int __ret = __builtin_tx_nesting_depth (); \
+ __asm__ __volatile__ (".machine pop"); \
+ __ret; \
+ })
+
+#endif