+2014-02-10 Ondřej Bílka <neleai@seznam.cz>
+
+ * assert/assert.c (__assert_fail_base): Use glibc_likely instead __builtin_expect.
+ * benchtests/bench-memmem.c (simple_memmem): Likewise.
+ * catgets/open_catalog.c (__open_catalog): Likewise.
+ * csu/libc-start.c (LIBC_START_MAIN): Likewise.
+ * debug/confstr_chk.c: Likewise.
+ * debug/fread_chk.c (__fread_chk): Likewise.
+ * debug/fread_u_chk.c (__fread_unlocked_chk): Likewise.
+ * debug/getgroups_chk.c: Likewise.
+ * debug/mbsnrtowcs_chk.c: Likewise.
+ * debug/mbsrtowcs_chk.c: Likewise.
+ * debug/mbstowcs_chk.c: Likewise.
+ * debug/memcpy_chk.c: Likewise.
+ * debug/memmove_chk.c: Likewise.
+ * debug/mempcpy_chk.c: Likewise.
+ * debug/memset_chk.c: Likewise.
+ * debug/stpcpy_chk.c (__stpcpy_chk): Likewise.
+ * debug/strcat_chk.c (__strcat_chk): Likewise.
+ * debug/strcpy_chk.c (__strcpy_chk): Likewise.
+ * debug/strncat_chk.c (__strncat_chk): Likewise.
+ * debug/vsnprintf_chk.c (___vsnprintf_chk): Likewise.
+ * debug/vswprintf_chk.c (__vswprintf_chk): Likewise.
+ * debug/wcpcpy_chk.c (__wcpcpy_chk): Likewise.
+ * debug/wcpncpy_chk.c: Likewise.
+ * debug/wcscat_chk.c (__wcscat_chk): Likewise.
+ * debug/wcscpy_chk.c (__wcscpy_chk): Likewise.
+ * debug/wcsncat_chk.c (__wcsncat_chk): Likewise.
+ * debug/wcsncpy_chk.c: Likewise.
+ * debug/wcsnrtombs_chk.c: Likewise.
+ * debug/wcsrtombs_chk.c: Likewise.
+ * debug/wcstombs_chk.c: Likewise.
+ * debug/wmemcpy_chk.c: Likewise.
+ * debug/wmemmove_chk.c: Likewise.
+ * debug/wmempcpy_chk.c: Likewise.
+ * debug/wmemset_chk.c: Likewise.
+ * dirent/scandirat.c (SCANDIRAT): Likewise.
+ * dlfcn/dladdr1.c (dladdr1): Likewise.
+ * dlfcn/dladdr.c (dladdr): Likewise.
+ * dlfcn/dlclose.c (dlclose_doit): Likewise.
+ * dlfcn/dlerror.c (__dlerror): Likewise.
+ * dlfcn/dlinfo.c (dlinfo_doit): Likewise.
+ * dlfcn/dlmopen.c (dlmopen_doit): Likewise.
+ * dlfcn/dlopen.c (dlopen_doit): Likewise.
+ * dlfcn/dlopenold.c (__dlopen_nocheck): Likewise.
+ * dlfcn/dlsym.c (dlsym_doit): Likewise.
+ * dlfcn/dlvsym.c (dlvsym_doit): Likewise.
+ * elf/dl-cache.c (_dl_load_cache_lookup): Likewise.
+ * elf/dl-close.c (remove_slotinfo, _dl_close_worker, _dl_close):
+ Likewise.
+ * elf/dl-conflict.c: Likewise.
+ * elf/dl-deps.c (_dl_build_local_scope, _dl_map_object_deps): Likewise.
+ * elf/dl-dst.h: Likewise.
+ * elf/dl-fini.c (_dl_sort_fini, _dl_fini): Likewise.
+ * elf/dl-fptr.c (_dl_make_fptr): Likewise.
+ * elf/dl-hwcaps.c (_dl_important_hwcaps): Likewise.
+ * elf/dl-init.c (call_init, _dl_init): Likewise.
+ * elf/dl-libc.c (__libc_dlopen_mode, __libc_dlsym): Likewise.
+ * elf/dl-load.c (_dl_dst_substitute, fillin_rpath, _dl_init_paths,
+ _dl_map_object_from_fd, open_verify, open_path,
+ _dl_map_object): Likewise.
+ * elf/dl-lookup.c (do_lookup_x, add_dependency, _dl_lookup_symbol_x):
+ Likewise.
+ * elf/dl-minimal.c (__libc_memalign): Likewise.
+ * elf/dl-open.c (add_to_global, dl_open_worker, _dl_open): Likewise.
+ * elf/dl-reloc.c (_dl_relocate_object): Likewise.
+ * elf/dl-runtime.c (_dl_fixup, _dl_profile_fixup): Likewise.
+ * elf/dl-sym.c (do_sym): Likewise.
+ * elf/dl-tls.c (tls_get_addr_tail, update_get_addr, __tls_get_addr,
+ _dl_tls_get_addr_soft): Likewise.
+ * elf/dl-version.c (match_symbol, _dl_check_map_versions): Likewise.
+ * elf/dl-writev.h (_dl_writev): Likewise.
+ * elf/ldconfig.c (search_dir): Likewise.
+ * elf/rtld.c (_dl_start_final, _dl_start, init_tls, do_preload,
+ dl_main): Likewise.
+ * elf/setup-vdso.h (setup_vdso): Likewise.
+ * grp/compat-initgroups.c (compat_call): Likewise.
+ * grp/fgetgrent.c (fgetgrent): Likewise.
+ * grp/initgroups.c (getgrouplist, initgroups): Likewise.
+ * grp/putgrent.c (putgrent): Likewise.
+ * hesiod/nss_hesiod/hesiod-grp.c (_nss_hesiod_initgroups_dyn):
+ Likewise.
+ * hurd/hurdinit.c: Likewise.
+ * iconvdata/8bit-gap.c (struct): Likewise.
+ * iconvdata/ansi_x3.110.c : Likewise.
+ * iconvdata/big5.c : Likewise.
+ * iconvdata/big5hkscs.c : Likewise.
+ * iconvdata/cp1255.c: Likewise.
+ * iconvdata/cp1258.c : Likewise.
+ * iconvdata/cp932.c : Likewise.
+ * iconvdata/euc-cn.c: Likewise.
+ * iconvdata/euc-jisx0213.c : Likewise.
+ * iconvdata/euc-jp.c: Likewise.
+ * iconvdata/euc-jp-ms.c : Likewise.
+ * iconvdata/euc-kr.c (euckr_from_ucs4): Likewise.
+ * iconvdata/gb18030.c : Likewise.
+ * iconvdata/gbbig5.c (const): Likewise.
+ * iconvdata/gbgbk.c: Likewise.
+ * iconvdata/gbk.c : Likewise.
+ * iconvdata/ibm1364.c : Likewise.
+ * iconvdata/ibm930.c : Likewise.
+ * iconvdata/ibm932.c: Likewise.
+ * iconvdata/ibm933.c : Likewise.
+ * iconvdata/ibm935.c : Likewise.
+ * iconvdata/ibm937.c : Likewise.
+ * iconvdata/ibm939.c : Likewise.
+ * iconvdata/ibm943.c: Likewise.
+ * iconvdata/iso_11548-1.c: Likewise.
+ * iconvdata/iso-2022-cn.c : Likewise.
+ * iconvdata/iso-2022-cn-ext.c : Likewise.
+ * iconvdata/iso-2022-jp-3.c: Likewise.
+ * iconvdata/iso-2022-jp.c (gconv_end): Likewise.
+ * iconvdata/iso-2022-kr.c : Likewise.
+ * iconvdata/iso646.c (gconv_end): Likewise.
+ * iconvdata/iso_6937-2.c : Likewise.
+ * iconvdata/iso_6937.c : Likewise.
+ * iconvdata/iso8859-1.c: Likewise.
+ * iconvdata/johab.c (johab_sym_hanja_to_ucs): Likewise.
+ * iconvdata/shift_jisx0213.c : Likewise.
+ * iconvdata/sjis.c : Likewise.
+ * iconvdata/t.61.c : Likewise.
+ * iconvdata/tcvn5712-1.c : Likewise.
+ * iconvdata/tscii.c: Likewise.
+ * iconvdata/uhc.c : Likewise.
+ * iconvdata/unicode.c (gconv_end): Likewise.
+ * iconvdata/utf-16.c (gconv_end): Likewise.
+ * iconvdata/utf-32.c (gconv_end): Likewise.
+ * iconvdata/utf-7.c (base64): Likewise.
+ * iconv/gconv_cache.c (__gconv_load_cache): Likewise.
+ * iconv/gconv_close.c (__gconv_close): Likewise.
+ * iconv/gconv_open.c (__gconv_open): Likewise.
+ * iconv/gconv_simple.c (internal_ucs4_loop_single, ucs4_internal_loop,
+ ucs4_internal_loop_unaligned, ucs4_internal_loop_single,
+ internal_ucs4le_loop_single, ucs4le_internal_loop,
+ ucs4le_internal_loop_unaligned, ucs4le_internal_loop_single): Likewise.
+ * iconv/iconv.c (iconv): Likewise.
+ * iconv/iconv_close.c: Likewise.
+ * iconv/loop.c (SINGLE): Likewise.
+ * iconv/skeleton.c (FUNCTION_NAME): Likewise.
+ * include/atomic.h: Likewise.
+ * inet/inet6_option.c (option_alloc): Likewise.
+ * intl/bindtextdom.c (set_binding_values): Likewise.
+ * intl/dcigettext.c (DCIGETTEXT, _nl_find_msg): Likewise.
+ * intl/loadmsgcat.c (_nl_load_domain): Likewise.
+ * intl/localealias.c (read_alias_file): Likewise.
+ * libio/filedoalloc.c (_IO_file_doallocate): Likewise.
+ * libio/fileops.c (_IO_file_open, _IO_file_underflow_mmap,
+ _IO_new_file_overflow, _IO_file_xsgetn_mmap): Likewise.
+ * libio/fmemopen.c (fmemopen): Likewise.
+ * libio/iofgets.c (_IO_fgets): Likewise.
+ * libio/iofgets_u.c (fgets_unlocked): Likewise.
+ * libio/iofgetws.c (fgetws): Likewise.
+ * libio/iofgetws_u.c (fgetws_unlocked): Likewise.
+ * libio/iogetdelim.c (_IO_getdelim): Likewise.
+ * libio/wfileops.c (_IO_wfile_underflow, _IO_wfile_underflow_mmap,
+ adjust_wide_data, _IO_wfile_seekoff): Likewise.
+ * locale/findlocale.c (_nl_find_locale): Likewise.
+ * locale/loadarchive.c (_nl_load_locale_from_archive): Likewise.
+ * locale/loadlocale.c (_nl_intern_locale_data, _nl_load_locale):
+ Likewise.
+ * locale/setlocale.c (setlocale): Likewise.
+ * login/programs/pt_chown.c (main): Likewise.
+ * malloc/arena.c (ptmalloc_init, shrink_heap, arena_get2): Likewise.
+ * malloc/malloc.c (_int_malloc, _int_free): Likewise.
+ * malloc/memusage.c (update_data, malloc, realloc, calloc, free,
+ mmap, mmap64, mremap, munmap): Likewise.
+ * math/e_exp2l.c: Likewise.
+ * math/e_scalb.c (invalid_fn, __ieee754_scalb): Likewise.
+ * math/e_scalbf.c (invalid_fn, __ieee754_scalbf): Likewise.
+ * math/e_scalbl.c (invalid_fn, __ieee754_scalbl): Likewise.
+ * math/s_catan.c (__catan): Likewise.
+ * math/s_catanf.c (__catanf): Likewise.
+ * math/s_catanh.c (__catanh): Likewise.
+ * math/s_catanhf.c (__catanhf): Likewise.
+ * math/s_catanhl.c (__catanhl): Likewise.
+ * math/s_catanl.c (__catanl): Likewise.
+ * math/s_ccosh.c (__ccosh): Likewise.
+ * math/s_ccoshf.c (__ccoshf): Likewise.
+ * math/s_ccoshl.c (__ccoshl): Likewise.
+ * math/s_cexp.c (__cexp): Likewise.
+ * math/s_cexpf.c (__cexpf): Likewise.
+ * math/s_cexpl.c (__cexpl): Likewise.
+ * math/s_clog10.c (__clog10): Likewise.
+ * math/s_clog10f.c (__clog10f): Likewise.
+ * math/s_clog10l.c (__clog10l): Likewise.
+ * math/s_clog.c (__clog): Likewise.
+ * math/s_clogf.c (__clogf): Likewise.
+ * math/s_clogl.c (__clogl): Likewise.
+ * math/s_csin.c (__csin): Likewise.
+ * math/s_csinf.c (__csinf): Likewise.
+ * math/s_csinh.c (__csinh): Likewise.
+ * math/s_csinhf.c (__csinhf): Likewise.
+ * math/s_csinhl.c (__csinhl): Likewise.
+ * math/s_csinl.c (__csinl): Likewise.
+ * math/s_csqrt.c (__csqrt): Likewise.
+ * math/s_csqrtf.c (__csqrtf): Likewise.
+ * math/s_csqrtl.c (__csqrtl): Likewise.
+ * math/s_ctan.c (__ctan): Likewise.
+ * math/s_ctanf.c (__ctanf): Likewise.
+ * math/s_ctanh.c (__ctanh): Likewise.
+ * math/s_ctanhf.c (__ctanhf): Likewise.
+ * math/s_ctanhl.c (__ctanhl): Likewise.
+ * math/s_ctanl.c (__ctanl): Likewise.
+ * math/w_pow.c: Likewise.
+ * math/w_powf.c: Likewise.
+ * math/w_powl.c: Likewise.
+ * math/w_scalb.c (sysv_scalb): Likewise.
+ * math/w_scalbf.c (sysv_scalbf): Likewise.
+ * math/w_scalbl.c (sysv_scalbl): Likewise.
+ * misc/error.c (error_tail): Likewise.
+ * misc/pselect.c (__pselect): Likewise.
+ * nis/nis_callback.c (__nis_create_callback): Likewise.
+ * nis/nis_call.c (__nisfind_server): Likewise.
+ * nis/nis_creategroup.c (nis_creategroup): Likewise.
+ * nis/nis_domain_of_r.c (nis_domain_of_r): Likewise.
+ * nis/nis_findserv.c (__nis_findfastest_with_timeout): Likewise.
+ * nis/nis_getservlist.c (nis_getservlist): Likewise.
+ * nis/nis_lookup.c (nis_lookup): Likewise.
+ * nis/nis_subr.c (nis_leaf_of_r, nis_getnames): Likewise.
+ * nis/nis_table.c (__create_ib_request, nis_list, nis_add_entry,
+ nis_modify_entry, nis_first_entry, nis_next_entry): Likewise.
+ * nis/nis_xdr.c (xdr_endpoint): Likewise.
+ * nis/nss_compat/compat-grp.c (getgrent_next_file, internal_getgrnam_r,
+ internal_getgrgid_r): Likewise.
+ * nis/nss_compat/compat-initgroups.c (add_group, internal_getgrent_r):
+ Likewise.
+ * nis/nss_compat/compat-pwd.c (getpwent_next_file, internal_getpwnam_r,
+ internal_getpwuid_r): Likewise.
+ * nis/nss_compat/compat-spwd.c (getspent_next_file,
+ internal_getspnam_r): Likewise.
+ * nis/nss_nis/nis-alias.c (internal_nis_getaliasent_r,
+ _nss_nis_getaliasbyname_r): Likewise.
+ * nis/nss_nis/nis-ethers.c (_nss_nis_gethostton_r,
+ _nss_nis_getntohost_r): Likewise.
+ * nis/nss_nis/nis-grp.c (internal_nis_setgrent,
+ internal_nis_getgrent_r, _nss_nis_getgrnam_r, _nss_nis_getgrgid_r):
+ Likewise.
+ * nis/nss_nis/nis-hosts.c (_nss_nis_sethostent,
+ internal_nis_gethostent_r, internal_gethostbyname2_r,
+ _nss_nis_gethostbyname_r, _nss_nis_gethostbyaddr_r,
+ _nss_nis_gethostbyname4_r): Likewise.
+ * nis/nss_nis/nis-initgroups.c (internal_getgrent_r,
+ initgroups_netid): Likewise.
+ * nis/nss_nis/nis-netgrp.c (_nss_nis_setnetgrent): Likewise.
+ * nis/nss_nis/nis-network.c (internal_nis_getnetent_r,
+ _nss_nis_getnetbyname_r, _nss_nis_getnetbyaddr_r): Likewise.
+ * nis/nss_nis/nis-proto.c (_nss_nis_getprotobyname_r,
+ _nss_nis_getprotobynumber_r): Likewise.
+ * nis/nss_nis/nis-publickey.c (_nss_nis_getpublickey,
+ _nss_nis_getsecretkey): Likewise.
+ * nis/nss_nis/nis-pwd.c (_nis_saveit, internal_nis_setpwent,
+ internal_nis_getpwent_r, _nss_nis_getpwnam_r, _nss_nis_getpwuid_r):
+ Likewise.
+ * nis/nss_nis/nis-rpc.c (internal_nis_getrpcent_r,
+ _nss_nis_getrpcbyname_r, _nss_nis_getrpcbynumber_r): Likewise.
+ * nis/nss_nis/nis-service.c (dosearch, internal_nis_getservent_r,
+ _nss_nis_getservbyname_r, _nss_nis_getservbyport_r): Likewise.
+ * nis/nss_nis/nis-spwd.c (_nss_nis_setspent, internal_nis_getspent_r,
+ _nss_nis_getspnam_r): Likewise.
+ * nis/nss_nisplus/nisplus-alias.c (_nss_nisplus_getaliasbyname_r):
+ Likewise.
+ * nis/nss_nisplus/nisplus-ethers.c (_nss_nisplus_gethostton_r,
+ _nss_nisplus_getntohost_r): Likewise.
+ * nis/nss_nisplus/nisplus-grp.c (internal_nisplus_getgrent_r,
+ _nss_nisplus_getgrnam_r, _nss_nisplus_getgrgid_r): Likewise.
+ * nis/nss_nisplus/nisplus-hosts.c (internal_gethostbyname2_r,
+ _nss_nisplus_gethostbyaddr_r, _nss_nisplus_gethostbyname4_r): Likewise.
+ * nis/nss_nisplus/nisplus-initgroups.c (_nss_nisplus_initgroups_dyn):
+ Likewise.
+ * nis/nss_nisplus/nisplus-network.c (_nss_nisplus_getnetbyname_r,
+ _nss_nisplus_getnetbyaddr_r): Likewise.
+ * nis/nss_nisplus/nisplus-proto.c (_nss_nisplus_getprotobyname_r,
+ _nss_nisplus_getprotobynumber_r): Likewise.
+ * nis/nss_nisplus/nisplus-pwd.c (internal_nisplus_getpwent_r,
+ _nss_nisplus_getpwnam_r, _nss_nisplus_getpwuid_r): Likewise.
+ * nis/nss_nisplus/nisplus-rpc.c (_nss_nisplus_getrpcbyname_r):
+ Likewise.
+ * nis/nss_nisplus/nisplus-service.c (internal_nisplus_getservent_r,
+ _nss_nisplus_getservbyname_r, _nss_nisplus_getservbyport_r): Likewise.
+ * nis/nss_nisplus/nisplus-spwd.c (internal_nisplus_getspent_r,
+ _nss_nisplus_getspnam_r): Likewise.
+ * nis/ypclnt.c (__yp_bind, yp_match, yp_all, yp_maplist): Likewise.
+ * nscd/aicache.c (addhstaiX): Likewise.
+ * nscd/cache.c (cache_search, prune_cache): Likewise.
+ * nscd/connections.c (register_traced_file, send_ro_fd, handle_request,
+ nscd_run_prune, nscd_run_worker, fd_ready, main_loop_epoll): Likewise.
+ * nscd/grpcache.c (addgrbyX): Likewise.
+ * nscd/hstcache.c (addhstbyX): Likewise.
+ * nscd/initgrcache.c (addinitgroupsX): Likewise.
+ * nscd/mem.c (gc, mempool_alloc): Likewise.
+ * nscd/netgroupcache.c (do_notfound, addgetnetgrentX, addinnetgrX):
+ Likewise.
+ * nscd/nscd-client.h (__nscd_acquire_maplock, __nscd_drop_map_ref):
+ Likewise.
+ * nscd/nscd_getai.c (__nscd_getai): Likewise.
+ * nscd/nscd_getgr_r.c (nscd_getgr_r): Likewise.
+ * nscd/nscd_gethst_r.c (__nscd_get_nl_timestamp, nscd_gethst_r):
+ Likewise.
+ * nscd/nscd_getpw_r.c (nscd_getpw_r): Likewise.
+ * nscd/nscd_getserv_r.c (nscd_getserv_r): Likewise.
+ * nscd/nscd_helper.c (__readvall, open_socket,
+ __nscd_get_mapping, __nscd_get_map_ref): Likewise.
+ * nscd/nscd_initgroups.c (__nscd_getgrouplist): Likewise.
+ * nscd/nscd_netgroup.c (__nscd_setnetgrent, __nscd_innetgr): Likewise.
+ * nscd/pwdcache.c (addpwbyX): Likewise.
+ * nscd/selinux.c (preserve_capabilities): Likewise.
+ * nscd/servicescache.c (addservbyX): Likewise.
+ * nss/nss_files/files-XXX.c (internal_getent): Likewise.
+ * posix/fnmatch.c (fnmatch): Likewise.
+ * posix/getopt.c (_getopt_internal_r): Likewise.
+ * posix/glob.c (glob, glob_in_dir): Likewise.
+ * posix/wordexp.c (exec_comm_child): Likewise.
+ * resolv/nss_dns/dns-host.c (_nss_dns_gethostbyaddr2_r, getanswer_r,
+ gaih_getanswer_slice): Likewise.
+ * resolv/nss_dns/dns-network.c (getanswer_r): Likewise.
+ * resolv/res_init.c: Likewise.
+ * resolv/res_mkquery.c (res_nmkquery): Likewise.
+ * resolv/res_query.c (__libc_res_nquery): Likewise.
+ * resolv/res_send.c (__libc_res_nsend, send_vc, reopen, send_dg):
+ Likewise.
+ * stdio-common/_i18n_number.h (_i18n_number_rewrite): Likewise.
+ * stdio-common/perror.c (perror): Likewise.
+ * stdio-common/printf_fp.c (___printf_fp): Likewise.
+ * stdio-common/tmpnam.c (tmpnam): Likewise.
+ * stdio-common/vfscanf.c (_IO_vfscanf_internal): Likewise.
+ * stdlib/cxa_finalize.c (__cxa_finalize): Likewise.
+ * stdlib/cxa_thread_atexit_impl.c (__cxa_thread_atexit_impl): Likewise.
+ * stdlib/drand48-iter.c (__drand48_iterate): Likewise.
+ * stdlib/putenv.c (putenv): Likewise.
+ * stdlib/setenv.c (__add_to_environ): Likewise.
+ * stdlib/strtod_l.c (____STRTOF_INTERNAL): Likewise.
+ * stdlib/strtol_l.c (INTERNAL): Likewise.
+ * string/memmem.c (memmem): Likewise.
+ * string/strerror.c (strerror): Likewise.
+ * string/strnlen.c (__strnlen): Likewise.
+ * string/test-memmem.c (simple_memmem): Likewise.
+ * sunrpc/clnt_udp.c (__libc_clntudp_bufcreate): Likewise.
+ * sunrpc/pm_getport.c (__get_socket): Likewise.
+ * sysdeps/gnu/unwind-resume.c (init, _Unwind_Resume): Likewise.
+ * sysdeps/i386/dl-irel.h (elf_irel): Likewise.
+ * sysdeps/i386/dl-machine.h (elf_machine_runtime_setup,
+ elf_machine_rel, elf_machine_lazy_rel, elf_machine_lazy_rela):
+ Likewise.
+ * sysdeps/ieee754/dbl-64/e_atanh.c (__ieee754_atanh): Likewise.
+ * sysdeps/ieee754/dbl-64/e_exp2.c (__ieee754_exp2): Likewise.
+ * sysdeps/ieee754/dbl-64/e_fmod.c (__ieee754_fmod): Likewise.
+ * sysdeps/ieee754/dbl-64/e_gamma_r.c (__ieee754_gamma_r): Likewise.
+ * sysdeps/ieee754/dbl-64/e_hypot.c (__ieee754_hypot): Likewise.
+ * sysdeps/ieee754/dbl-64/e_j1.c (__ieee754_j1, __ieee754_y1): Likewise.
+ * sysdeps/ieee754/dbl-64/e_jn.c (__ieee754_jn, __ieee754_yn): Likewise.
+ * sysdeps/ieee754/dbl-64/e_log10.c (__ieee754_log10): Likewise.
+ * sysdeps/ieee754/dbl-64/e_log2.c (__ieee754_log2): Likewise.
+ * sysdeps/ieee754/dbl-64/e_log.c (__ieee754_log): Likewise.
+ * sysdeps/ieee754/dbl-64/e_sinh.c (__ieee754_sinh): Likewise.
+ * sysdeps/ieee754/dbl-64/s_asinh.c (__asinh): Likewise.
+ * sysdeps/ieee754/dbl-64/s_fma.c (__fma): Likewise.
+ * sysdeps/ieee754/dbl-64/s_log1p.c (__log1p): Likewise.
+ * sysdeps/ieee754/dbl-64/s_logb.c (__logb): Likewise.
+ * sysdeps/ieee754/dbl-64/s_modf.c (__modf): Likewise.
+ * sysdeps/ieee754/dbl-64/s_scalbln.c (__scalbln): Likewise.
+ * sysdeps/ieee754/dbl-64/s_scalbn.c (__scalbn): Likewise.
+ * sysdeps/ieee754/dbl-64/wordsize-64/e_acosh.c (__ieee754_acosh):
+ Likewise.
+ * sysdeps/ieee754/dbl-64/wordsize-64/e_log10.c (__ieee754_log10):
+ Likewise.
+ * sysdeps/ieee754/dbl-64/wordsize-64/e_log2.c (__ieee754_log2):
+ Likewise.
+ * sysdeps/ieee754/dbl-64/wordsize-64/s_frexp.c (__frexp): Likewise.
+ * sysdeps/ieee754/dbl-64/wordsize-64/s_logb.c (__logb): Likewise.
+ * sysdeps/ieee754/dbl-64/wordsize-64/s_remquo.c (__remquo): Likewise.
+ * sysdeps/ieee754/dbl-64/wordsize-64/s_round.c (__round): Likewise.
+ * sysdeps/ieee754/flt-32/e_atanhf.c (__ieee754_atanhf): Likewise.
+ * sysdeps/ieee754/flt-32/e_gammaf_r.c (__ieee754_gammaf_r): Likewise.
+ * sysdeps/ieee754/flt-32/s_logbf.c (__logbf): Likewise.
+ * sysdeps/ieee754/ldbl-128ibm/e_fmodl.c (__ieee754_fmodl): Likewise.
+ * sysdeps/ieee754/ldbl-128ibm/math_ldbl.h (ldbl_nearbyint): Likewise.
+ * sysdeps/ieee754/ldbl-128ibm/s_llrintl.c (__llrintl): Likewise.
+ * sysdeps/ieee754/ldbl-128ibm/s_llroundl.c (__llroundl): Likewise.
+ * sysdeps/ieee754/ldbl-128ibm/s_logbl.c (__logbl): Likewise.
+ * sysdeps/ieee754/ldbl-128ibm/s_lrintl.c (__lrintl): Likewise.
+ * sysdeps/ieee754/ldbl-128ibm/s_lroundl.c (__lroundl): Likewise.
+ * sysdeps/ieee754/ldbl-128/s_fmal.c (__fmal): Likewise.
+ * sysdeps/ieee754/ldbl-96/e_gammal_r.c (__ieee754_gammal_r): Likewise.
+ * sysdeps/ieee754/ldbl-96/e_j0l.c (__ieee754_j0l, __ieee754_y0l):
+ Likewise.
+ * sysdeps/ieee754/ldbl-96/e_j1l.c (__ieee754_j1l, __ieee754_y1l):
+ Likewise.
+ * sysdeps/ieee754/ldbl-96/e_jnl.c (__ieee754_jnl, __ieee754_ynl):
+ Likewise.
+ * sysdeps/ieee754/ldbl-96/s_fma.c (__fma): Likewise.
+ * sysdeps/ieee754/ldbl-96/s_fmal.c (__fmal): Likewise.
+ * sysdeps/posix/clock_getres.c (hp_timing_getres, realtime_getres):
+ Likewise.
+ * sysdeps/posix/fdopendir.c (__fdopendir): Likewise.
+ * sysdeps/posix/getaddrinfo.c (gaih_inet, getaddrinfo): Likewise.
+ * sysdeps/posix/opendir.c (__opendirat): Likewise.
+ * sysdeps/posix/sleep.c: Likewise.
+ * sysdeps/posix/tempname.c: Likewise.
+ * sysdeps/powerpc/powerpc32/dl-irel.h (elf_irela): Likewise.
+ * sysdeps/powerpc/powerpc32/dl-machine.c (__process_machine_rela):
+ Likewise.
+ * sysdeps/powerpc/powerpc32/dl-machine.h (elf_machine_runtime_setup,
+ elf_machine_rela): Likewise.
+ * sysdeps/powerpc/powerpc64/dl-irel.h (elf_irela): Likewise.
+ * sysdeps/powerpc/powerpc64/dl-machine.h (elf_machine_rela): Likewise.
+ * sysdeps/pthread/aio_notify.c (__aio_notify_only): Likewise.
+ * sysdeps/pthread/aio_suspend.c (do_aio_misc_wait, aio_suspend):
+ Likewise.
+ * sysdeps/s390/dl-irel.h (elf_irela): Likewise.
+ * sysdeps/s390/s390-32/dl-machine.h (elf_machine_runtime_setup,
+ elf_machine_rela, elf_machine_lazy_rel): Likewise.
+ * sysdeps/s390/s390-64/dl-machine.h (elf_machine_runtime_setup,
+ elf_machine_rela, elf_machine_lazy_rel): Likewise.
+ * sysdeps/s390/s390-64/utf16-utf32-z9.c (gconv_end): Likewise.
+ * sysdeps/s390/s390-64/utf8-utf16-z9.c (gconv_end): Likewise.
+ * sysdeps/s390/s390-64/utf8-utf32-z9.c (gconv_end): Likewise.
+ * sysdeps/sh/dl-machine.h (elf_machine_rela): Likewise.
+ * sysdeps/sparc/sparc32/dl-irel.h (elf_irela): Likewise.
+ * sysdeps/sparc/sparc32/dl-machine.h (elf_machine_rela,
+ elf_machine_lazy_rel): Likewise.
+ * sysdeps/sparc/sparc64/dl-irel.h (elf_irela): Likewise.
+ * sysdeps/sparc/sparc64/dl-machine.h (elf_machine_rela,
+ elf_machine_lazy_rel): Likewise.
+ * sysdeps/sparc/sparc64/dl-plt.h (sparc64_fixup_plt): Likewise.
+ * sysdeps/unix/clock_gettime.c (hp_timing_gettime): Likewise.
+ * sysdeps/unix/clock_settime.c (hp_timing_settime): Likewise.
+ * sysdeps/unix/grantpt.c (grantpt): Likewise.
+ * sysdeps/unix/sysv/linux/accept4.c (accept4): Likewise.
+ * sysdeps/unix/sysv/linux/adjtime.c (ADJTIME): Likewise.
+ * sysdeps/unix/sysv/linux/check_pf.c (__check_pf): Likewise.
+ * sysdeps/unix/sysv/linux/dl-osinfo.h (_dl_setup_stack_chk_guard):
+ Likewise.
+ * sysdeps/unix/sysv/linux/faccessat.c (faccessat): Likewise.
+ * sysdeps/unix/sysv/linux/fchmodat.c (fchmodat): Likewise.
+ * sysdeps/unix/sysv/linux/fchownat.c (fchownat): Likewise.
+ * sysdeps/unix/sysv/linux/futimesat.c (futimesat): Likewise.
+ * sysdeps/unix/sysv/linux/fxstatat64.c (__fxstatat64): Likewise.
+ * sysdeps/unix/sysv/linux/fxstatat.c (__fxstatat): Likewise.
+ * sysdeps/unix/sysv/linux/i386/fallocate64.c (fallocate64): Likewise.
+ * sysdeps/unix/sysv/linux/i386/fallocate.c (fallocate): Likewise.
+ * sysdeps/unix/sysv/linux/i386/fchownat.c (fchownat): Likewise.
+ * sysdeps/unix/sysv/linux/i386/fxstatat.c (__fxstatat): Likewise.
+ * sysdeps/unix/sysv/linux/i386/get_clockfreq.c (__get_clockfreq):
+ Likewise.
+ * sysdeps/unix/sysv/linux/i386/posix_fallocate64.c
+ (__posix_fallocate64_l64): Likewise.
+ * sysdeps/unix/sysv/linux/i386/posix_fallocate.c
+ (posix_fallocate): Likewise.
+ * sysdeps/unix/sysv/linux/i386/scandir64.c (__old_scandir64): Likewise.
+ * sysdeps/unix/sysv/linux/i386/sysdep.h: Likewise.
+ * sysdeps/unix/sysv/linux/ifaddrs.c (__netlink_request,
+ getifaddrs_internal): Likewise.
+ * sysdeps/unix/sysv/linux/linkat.c (linkat): Likewise.
+ * sysdeps/unix/sysv/linux/mkdirat.c (mkdirat): Likewise.
+ * sysdeps/unix/sysv/linux/mq_unlink.c (mq_unlink): Likewise.
+ * sysdeps/unix/sysv/linux/openat.c (OPENAT_NOT_CANCEL): Likewise.
+ * sysdeps/unix/sysv/linux/posix_fallocate64.c
+ (__posix_fallocate64_l64): Likewise.
+ * sysdeps/unix/sysv/linux/posix_fallocate.c (posix_fallocate):
+ Likewise.
+ * sysdeps/unix/sysv/linux/powerpc/fchownat.c (fchownat): Likewise.
+ * sysdeps/unix/sysv/linux/powerpc/get_clockfreq.c (__get_clockfreq):
+ Likewise.
+ * sysdeps/unix/sysv/linux/readlinkat.c (readlinkat): Likewise.
+ * sysdeps/unix/sysv/linux/recvmmsg.c (recvmmsg): Likewise.
+ * sysdeps/unix/sysv/linux/renameat.c (renameat): Likewise.
+ * sysdeps/unix/sysv/linux/s390/s390-32/sysdep.h: Likewise.
+ * sysdeps/unix/sysv/linux/s390/s390-64/sysdep.h: Likewise.
+ * sysdeps/unix/sysv/linux/sched_setaffinity.c: Likewise.
+ * sysdeps/unix/sysv/linux/sendmmsg.c (__sendmmsg): Likewise.
+ * sysdeps/unix/sysv/linux/shm_open.c (where_is_shmfs, shm_open):
+ Likewise.
+ * sysdeps/unix/sysv/linux/sleep.c (__sleep): Likewise.
+ * sysdeps/unix/sysv/linux/symlinkat.c (symlinkat): Likewise.
+ * sysdeps/unix/sysv/linux/tcgetattr.c (__tcgetattr): Likewise.
+ * sysdeps/unix/sysv/linux/ttyname.c (ttyname): Likewise.
+ * sysdeps/unix/sysv/linux/ttyname_r.c (__ttyname_r): Likewise.
+ * sysdeps/unix/sysv/linux/unlinkat.c (unlinkat): Likewise.
+ * sysdeps/unix/sysv/linux/wordsize-64/fxstatat.c (__fxstatat):
+ Likewise.
+ * sysdeps/unix/sysv/linux/wordsize-64/posix_fallocate.c
+ (posix_fallocate): Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/sysdep.h: Likewise.
+ * sysdeps/unix/sysv/linux/xmknodat.c (__xmknodat): Likewise.
+ * sysdeps/x86_64/dl-irel.h (elf_irela): Likewise.
+ * sysdeps/x86_64/dl-machine.h (elf_machine_runtime_setup,
+ elf_machine_rela, elf_machine_rela_relative, elf_machine_lazy_rel):
+ Likewise.
+ * time/asctime.c (asctime_internal): Likewise.
+ * time/tzfile.c (__tzfile_read, __tzfile_compute): Likewise.
+ * time/tzset.c (__tzset_parse_tz): Likewise.
+ * wcsmbs/mbrtoc16.c (mbrtoc16): Likewise.
+ * wcsmbs/mbrtowc.c (__mbrtowc): Likewise.
+ * wcsmbs/wcsmbsload.c (__wcsmbs_load_conv): Likewise.
+ * wcsmbs/wcsmbsload.h: Likewise.
+
2014-02-10 Ondřej Bílka <neleai@seznam.cz>
[BZ #15894]
total = (total + 1 + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1);
struct abort_msg_s *buf = __mmap (NULL, total, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
- if (__builtin_expect (buf != MAP_FAILED, 1))
+ if (__glibc_likely (buf != MAP_FAILED))
{
buf->size = total;
strcpy (buf->msg, str);
/* Sanity check, otherwise the loop might search through the whole
memory. */
- if (__builtin_expect (haystack_len < needle_len, 0))
+ if (__glibc_unlikely (haystack_len < needle_len))
return NULL;
for (begin = (const char *) haystack; begin <= last_possible; ++begin)
{
const char *run_nlspath = nlspath;
#define ENOUGH(n) \
- if (__builtin_expect (bufact + (n) >= bufmax, 0)) \
+ if (__glibc_unlikely (bufact + (n) >= bufmax)) \
{ \
char *old_buf = buf; \
bufmax += 256 + (n); \
/* Determine whether the file is a catalog file and if yes whether
it is written using the correct byte order. Else we have to swap
the values. */
- if (__builtin_expect (catalog->file_ptr->magic == CATGETS_MAGIC, 1))
+ if (__glibc_likely (catalog->file_ptr->magic == CATGETS_MAGIC))
swapping = 0;
else if (catalog->file_ptr->magic == SWAPU32 (CATGETS_MAGIC))
swapping = 1;
#endif
/* Register the destructor of the dynamic linker if there is any. */
- if (__builtin_expect (rtld_fini != NULL, 1))
+ if (__glibc_likely (rtld_fini != NULL))
__cxa_atexit ((void (*) (void *)) rtld_fini, NULL, NULL);
#ifndef SHARED
#ifdef SHARED
/* Auditing checkpoint: we have a new object. */
- if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
+ if (__glibc_unlikely (GLRO(dl_naudit) > 0))
{
struct audit_ifaces *afct = GLRO(dl_audit);
struct link_map *head = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
#endif
#ifdef SHARED
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS))
GLRO(dl_debug_printf) ("\ntransferring control: %s\n\n", argv[0]);
#endif
int not_first_call;
not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
- if (__builtin_expect (! not_first_call, 1))
+ if (__glibc_likely (! not_first_call))
{
struct pthread *self = THREAD_SELF;
size_t
__confstr_chk (int name, char *buf, size_t len, size_t buflen)
{
- if (__builtin_expect (buflen < len, 0))
+ if (__glibc_unlikely (buflen < len))
__chk_fail ();
return confstr (name, buf, len);
__chk_fail ();
}
- if (__builtin_expect (bytes_requested > ptrlen, 0))
+ if (__glibc_unlikely (bytes_requested > ptrlen))
__chk_fail ();
CHECK_FILE (stream, 0);
__chk_fail ();
}
- if (__builtin_expect (bytes_requested > ptrlen, 0))
+ if (__glibc_unlikely (bytes_requested > ptrlen))
__chk_fail ();
CHECK_FILE (stream, 0);
int
__getgroups_chk (int size, __gid_t list[], size_t listlen)
{
- if (__builtin_expect (size < 0, 0))
+ if (__glibc_unlikely (size < 0))
{
__set_errno (EINVAL);
return -1;
}
- if (__builtin_expect (size * sizeof (__gid_t) > listlen, 0))
+ if (__glibc_unlikely (size * sizeof (__gid_t) > listlen))
__chk_fail ();
return __getgroups (size, list);
__mbsnrtowcs_chk (wchar_t *dst, const char **src, size_t nmc, size_t len,
mbstate_t *ps, size_t dstlen)
{
- if (__builtin_expect (dstlen < len, 0))
+ if (__glibc_unlikely (dstlen < len))
__chk_fail ();
return __mbsnrtowcs (dst, src, nmc, len, ps);
__mbsrtowcs_chk (wchar_t *dst, const char **src, size_t len,
mbstate_t *ps, size_t dstlen)
{
- if (__builtin_expect (dstlen < len, 0))
+ if (__glibc_unlikely (dstlen < len))
__chk_fail ();
return __mbsrtowcs (dst, src, len, ps);
size_t
__mbstowcs_chk (wchar_t *dst, const char *src, size_t len, size_t dstlen)
{
- if (__builtin_expect (dstlen < len, 0))
+ if (__glibc_unlikely (dstlen < len))
__chk_fail ();
mbstate_t state;
size_t len;
size_t dstlen;
{
- if (__builtin_expect (dstlen < len, 0))
+ if (__glibc_unlikely (dstlen < len))
__chk_fail ();
return memcpy (dstpp, srcpp, len);
size_t len;
size_t destlen;
{
- if (__builtin_expect (destlen < len, 0))
+ if (__glibc_unlikely (destlen < len))
__chk_fail ();
return memmove (dest, src, len);
size_t len;
size_t dstlen;
{
- if (__builtin_expect (dstlen < len, 0))
+ if (__glibc_unlikely (dstlen < len))
__chk_fail ();
return __mempcpy (dstpp, srcpp, len);
size_t len;
size_t dstlen;
{
- if (__builtin_expect (dstlen < len, 0))
+ if (__glibc_unlikely (dstlen < len))
__chk_fail ();
return memset (dstpp, c, len);
do
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
*d++ = *s;
}
/* Find the end of the string. */
do
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *s1++;
}
do
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *s2++;
*++s1 = c;
do
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *s;
*(s++ + off) = c;
/* Find the end of S1. */
do
{
- if (__builtin_expect (s1len-- == 0, 0))
+ if (__glibc_unlikely (s1len-- == 0))
__chk_fail ();
c = *s1++;
}
size_t n4 = n >> 2;
do
{
- if (__builtin_expect (s1len-- == 0, 0))
+ if (__glibc_unlikely (s1len-- == 0))
__chk_fail ();
c = *s2++;
*++s1 = c;
if (c == '\0')
return s;
- if (__builtin_expect (s1len-- == 0, 0))
+ if (__glibc_unlikely (s1len-- == 0))
__chk_fail ();
c = *s2++;
*++s1 = c;
if (c == '\0')
return s;
- if (__builtin_expect (s1len-- == 0, 0))
+ if (__glibc_unlikely (s1len-- == 0))
__chk_fail ();
c = *s2++;
*++s1 = c;
if (c == '\0')
return s;
- if (__builtin_expect (s1len-- == 0, 0))
+ if (__glibc_unlikely (s1len-- == 0))
__chk_fail ();
c = *s2++;
*++s1 = c;
while (n > 0)
{
- if (__builtin_expect (s1len-- == 0, 0))
+ if (__glibc_unlikely (s1len-- == 0))
__chk_fail ();
c = *s2++;
*++s1 = c;
if (c != '\0')
{
- if (__builtin_expect (s1len-- == 0, 0))
+ if (__glibc_unlikely (s1len-- == 0))
__chk_fail ();
*++s1 = '\0';
}
Though, maxlen is supposed to be the size of buffer pointed
to by s, so a conforming program can't pass such maxlen
to *snprintf. */
- if (__builtin_expect (slen < maxlen, 0))
+ if (__glibc_unlikely (slen < maxlen))
__chk_fail ();
_IO_strnfile sf;
Though, maxlen is supposed to be the size of buffer pointed
to by s, so a conforming program can't pass such maxlen
to *snprintf. */
- if (__builtin_expect (slen < maxlen, 0))
+ if (__glibc_unlikely (slen < maxlen))
__chk_fail ();
_IO_wstrnfile sf;
/* We need to handle the special case where MAXLEN is 0. Use the
overflow buffer right from the start. */
- if (__builtin_expect (maxlen == 0, 0))
+ if (__glibc_unlikely (maxlen == 0))
/* Since we have to write at least the terminating L'\0' a buffer
length of zero always makes the function fail. */
return -1;
do
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = wcp[off];
*++wcp = c;
wchar_t *
__wcpncpy_chk (wchar_t *dest, const wchar_t *src, size_t n, size_t destlen)
{
- if (__builtin_expect (destlen < n, 0))
+ if (__glibc_unlikely (destlen < n))
__chk_fail ();
/* This function is not often enough used to justify not using a
/* Find the end of the string. */
do
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *s1++;
}
do
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *s2++;
*++s1 = c;
do
{
- if (__builtin_expect (n-- == 0, 0))
+ if (__glibc_unlikely (n-- == 0))
__chk_fail ();
c = *wcp++;
wcp[off] = c;
do
{
- if (__builtin_expect (n-- == 0, 0))
+ if (__glibc_unlikely (n-- == 0))
__chk_fail ();
c = *src++;
*wcp++ = c;
/* Find the end of DEST. */
do
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *dest++;
}
size_t n4 = n >> 2;
do
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *src++;
*++dest = c;
if (c == L'\0')
return s;
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *src++;
*++dest = c;
if (c == L'\0')
return s;
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *src++;
*++dest = c;
if (c == L'\0')
return s;
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *src++;
*++dest = c;
while (n > 0)
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
c = *src++;
*++dest = c;
if (c != L'\0')
{
- if (__builtin_expect (destlen-- == 0, 0))
+ if (__glibc_unlikely (destlen-- == 0))
__chk_fail ();
*++dest = L'\0';
}
wchar_t *
__wcsncpy_chk (wchar_t *dest, const wchar_t *src, size_t n, size_t destlen)
{
- if (__builtin_expect (destlen < n, 0))
+ if (__glibc_unlikely (destlen < n))
__chk_fail ();
/* This function is not often enough used to justify not using a
__wcsnrtombs_chk (char *dst, const wchar_t **src, size_t nwc, size_t len,
mbstate_t *ps, size_t dstlen)
{
- if (__builtin_expect (dstlen < len, 0))
+ if (__glibc_unlikely (dstlen < len))
__chk_fail ();
return __wcsnrtombs (dst, src, nwc, len, ps);
__wcsrtombs_chk (char *dst, const wchar_t **src, size_t len,
mbstate_t *ps, size_t dstlen)
{
- if (__builtin_expect (dstlen < len, 0))
+ if (__glibc_unlikely (dstlen < len))
__chk_fail ();
return __wcsrtombs (dst, src, len, ps);
size_t
__wcstombs_chk (char *dst, const wchar_t *src, size_t len, size_t dstlen)
{
- if (__builtin_expect (dstlen < len, 0))
+ if (__glibc_unlikely (dstlen < len))
__chk_fail ();
mbstate_t state;
wchar_t *
__wmemcpy_chk (wchar_t *s1, const wchar_t *s2, size_t n, size_t ns1)
{
- if (__builtin_expect (ns1 < n, 0))
+ if (__glibc_unlikely (ns1 < n))
__chk_fail ();
return (wchar_t *) memcpy ((char *) s1, (char *) s2, n * sizeof (wchar_t));
}
wchar_t *
__wmemmove_chk (wchar_t *s1, const wchar_t *s2, size_t n, size_t ns1)
{
- if (__builtin_expect (ns1 < n, 0))
+ if (__glibc_unlikely (ns1 < n))
__chk_fail ();
return (wchar_t *) memmove ((char *) s1, (char *) s2, n * sizeof (wchar_t));
}
wchar_t *
__wmempcpy_chk (wchar_t *s1, const wchar_t *s2, size_t n, size_t ns1)
{
- if (__builtin_expect (ns1 < n, 0))
+ if (__glibc_unlikely (ns1 < n))
__chk_fail ();
return (wchar_t *) __mempcpy ((char *) s1, (char *) s2,
n * sizeof (wchar_t));
wchar_t *
__wmemset_chk (wchar_t *s, wchar_t c, size_t n, size_t dstlen)
{
- if (__builtin_expect (dstlen < n, 0))
+ if (__glibc_unlikely (dstlen < n))
__chk_fail ();
return wmemset (s, c, n);
/* Ignore errors from select or readdir */
__set_errno (0);
- if (__builtin_expect (c.cnt == vsize, 0))
+ if (__glibc_unlikely (c.cnt == vsize))
{
DIRENT_TYPE **new;
if (vsize == 0)
__dladdr (const void *address, Dl_info *info)
{
# ifdef SHARED
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dladdr (address, info);
# endif
return _dl_addr (address, info, NULL, NULL);
__dladdr1 (const void *address, Dl_info *info, void **extra, int flags)
{
# ifdef SHARED
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dladdr1 (address, info, extra, flags);
# endif
__dlclose (void *handle)
{
# ifdef SHARED
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dlclose (handle);
# endif
struct dl_action_result *result;
# ifdef SHARED
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dlerror ();
# endif
__dlinfo (void *handle, int request, void *arg DL_CALLER_DECL)
{
# ifdef SHARED
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dlinfo (handle, request, arg,
DL_CALLER);
# endif
/* It makes no sense to use RTLD_GLOBAL when loading a DSO into
a namespace other than the base namespace. */
- if (__builtin_expect (args->mode & RTLD_GLOBAL, 0))
+ if (__glibc_unlikely (args->mode & RTLD_GLOBAL))
GLRO(dl_signal_error) (EINVAL, NULL, NULL, N_("invalid mode"));
}
__dlmopen (Lmid_t nsid, const char *file, int mode DL_CALLER_DECL)
{
# ifdef SHARED
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dlmopen (nsid, file, mode, RETURN_ADDRESS (0));
# endif
__dlopen (const char *file, int mode DL_CALLER_DECL)
{
# ifdef SHARED
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dlopen (file, mode, DL_CALLER);
# endif
mode |= RTLD_LAZY;
args.mode = mode;
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dlopen (file, mode, RETURN_ADDRESS (0));
return _dlerror_run (dlopen_doit, &args) ? NULL : args.new;
__dlsym (void *handle, const char *name DL_CALLER_DECL)
{
# ifdef SHARED
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dlsym (handle, name, DL_CALLER);
# endif
DL_CALLER_DECL)
{
# ifdef SHARED
- if (__builtin_expect (_dlfcn_hook != NULL, 0))
+ if (__glibc_unlikely (_dlfcn_hook != NULL))
return _dlfcn_hook->dlvsym (handle, name, version_str, DL_CALLER);
# endif
\
/* Actually compare the entry with the key. */ \
cmpres = _dl_cache_libcmp (name, cache_data + key); \
- if (__builtin_expect (cmpres == 0, 0)) \
+ if (__glibc_unlikely (cmpres == 0)) \
{ \
/* Found it. LEFT now marks the last entry for which we \
know the name is correct. */ \
const char *best;
/* Print a message if the loading of libs is traced. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS))
_dl_debug_printf (" search cache=%s\n", LD_SO_CACHE);
if (cache == NULL)
/* The entry might still be in its unused state if we are closing an
object that wasn't fully set up. */
- if (__builtin_expect (old_map != NULL, 1))
+ if (__glibc_likely (old_map != NULL))
{
assert (old_map->l_tls_modid == idx);
dl_close_state = rerun;
/* There are still references to this object. Do nothing more. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
map->l_name, map->l_direct_opencount);
#ifdef SHARED
/* Auditing checkpoint: we remove an object. */
- if (__builtin_expect (do_audit, 0))
+ if (__glibc_unlikely (do_audit))
{
struct audit_ifaces *afct = GLRO(dl_audit);
for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
#ifdef SHARED
/* Auditing checkpoint: we will start deleting objects. */
- if (__builtin_expect (do_audit, 0))
+ if (__glibc_unlikely (do_audit))
{
struct link_map *head = ns->_ns_loaded;
struct audit_ifaces *afct = GLRO(dl_audit);
object. We can unmap it. */
/* Remove the object from the dtv slotinfo array if it uses TLS. */
- if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
+ if (__glibc_unlikely (imap->l_tls_blocksize > 0))
{
any_tls = true;
free (imap->l_reldeps);
/* Print debugging message. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
imap->l_name, imap->l_ns);
/* If we removed any object which uses TLS bump the generation counter. */
if (any_tls)
{
- if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
+ if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
_dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
if (tls_free_end == GL(dl_tls_static_used))
#ifdef SHARED
/* Auditing checkpoint: we have deleted all objects. */
- if (__builtin_expect (do_audit, 0))
+ if (__glibc_unlikely (do_audit))
{
struct link_map *head = ns->_ns_loaded;
/* Do not call the functions for any auditing object. */
struct link_map *map = _map;
/* First see whether we can remove the object at all. */
- if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
+ if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
{
assert (map->l_init_called);
/* Nope. Do nothing. */
ElfW(Rela) *conflictend)
{
#if ! ELF_MACHINE_NO_RELA
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_RELOC, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_RELOC))
_dl_debug_printf ("\nconflict processing: %s\n", DSO_FILENAME (l->l_name));
{
else \
{ \
/* This is for DT_AUXILIARY. */ \
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))\
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS)) \
_dl_debug_printf (N_("\
cannot load auxiliary `%s' because of empty dynamic string token " \
"substitution\n"), __str); \
bool malloced;
int err = _dl_catch_error (&objname, &errstring, &malloced,
openaux, &args);
- if (__builtin_expect (errstring != NULL, 0))
+ if (__glibc_unlikely (errstring != NULL))
{
char *new_errstring = strdupa (errstring);
objname = strdupa (objname);
bool malloced;
(void) _dl_catch_error (&objname, &errstring, &malloced,
openaux, &args);
- if (__builtin_expect (errstring != NULL, 0))
+ if (__glibc_unlikely (errstring != NULL))
{
/* We are not interested in the error message. */
assert (errstring != NULL);
bool malloced;
int err = _dl_catch_error (&objname, &errstring, &malloced,
openaux, &args);
- if (__builtin_expect (errstring != NULL, 0))
+ if (__glibc_unlikely (errstring != NULL))
{
char *new_errstring = strdupa (errstring);
objname = strdupa (objname);
itself will always be initialize last. */
memcpy (l_initfini, map->l_searchlist.r_list,
nlist * sizeof (struct link_map *));
- if (__builtin_expect (nlist > 1, 1))
+ if (__glibc_likely (nlist > 1))
{
/* We can skip looking for the binary itself which is at the front
of the search list. */
if (runp != NULL)
/* Look through the dependencies of the object. */
while (*runp != NULL)
- if (__builtin_expect (*runp++ == thisp, 0))
+ if (__glibc_unlikely (*runp++ == thisp))
{
/* Move the current object to the back past the last
object with it as the dependency. */
size_t __cnt = 0; \
const char *__sf = strchr (name, '$'); \
\
- if (__builtin_expect (__sf != NULL, 0)) \
+ if (__glibc_unlikely (__sf != NULL)) \
__cnt = _dl_dst_count (__sf, is_path); \
\
__cnt; })
if (runp != NULL)
/* Look through the dependencies of the object. */
while (*runp != NULL)
- if (__builtin_expect (*runp++ == thisp, 0))
+ if (__glibc_unlikely (*runp++ == thisp))
{
move:
/* Move the current object to the back past the last
goto next;
}
- if (__builtin_expect (maps[k]->l_reldeps != NULL, 0))
+ if (__glibc_unlikely (maps[k]->l_reldeps != NULL))
{
unsigned int m = maps[k]->l_reldeps->act;
struct link_map **relmaps = &maps[k]->l_reldeps->list[0];
/* Look through the relocation dependencies of the object. */
while (m-- > 0)
- if (__builtin_expect (relmaps[m] == thisp, 0))
+ if (__glibc_unlikely (relmaps[m] == thisp))
{
/* If a cycle exists with a link time dependency,
preserve the latter. */
struct link_map **runp = thisp->l_initfini;
if (runp != NULL)
while (*runp != NULL)
- if (__builtin_expect (*runp++ == maps[k], 0))
+ if (__glibc_unlikely (*runp++ == maps[k]))
goto ignore;
goto move;
}
goto again;
}
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS))
_dl_debug_printf ("\nruntime linker statistics:\n"
" final number of relocations: %lu\n"
"final number of relocations from cache: %lu\n",
Elf_Symndx symidx;
struct local *l;
- if (__builtin_expect (ftab == NULL, 0))
+ if (__glibc_unlikely (ftab == NULL))
ftab = make_fptr_table (map);
symtab = (const void *) D_PTR (map, l_info[DT_SYMTAB]);
len = strlen (p);
/* Skip entries that are not enabled in the mask word. */
- if (__builtin_expect (mask & ((ElfW(Word)) 1 << bit), 1))
+ if (__glibc_likely (mask & ((ElfW(Word)) 1 << bit)))
{
temp[m].str = p;
temp[m].len = len;
return;
/* Print a debug message if wanted. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS))
_dl_debug_printf ("\ncalling init: %s\n\n",
DSO_FILENAME (l->l_name));
ElfW(Dyn) *preinit_array_size = main_map->l_info[DT_PREINIT_ARRAYSZ];
unsigned int i;
- if (__builtin_expect (GL(dl_initfirst) != NULL, 0))
+ if (__glibc_unlikely (GL(dl_initfirst) != NULL))
{
call_init (GL(dl_initfirst), argc, argv, env);
GL(dl_initfirst) = NULL;
ElfW(Addr) *addrs;
unsigned int cnt;
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS))
_dl_debug_printf ("\ncalling preinit: %s\n\n",
DSO_FILENAME (main_map->l_name));
args.caller_dlopen = RETURN_ADDRESS (0);
#ifdef SHARED
- if (__builtin_expect (_dl_open_hook != NULL, 0))
+ if (__glibc_unlikely (_dl_open_hook != NULL))
return _dl_open_hook->dlopen_mode (name, mode);
return (dlerror_run (do_dlopen, &args) ? NULL : (void *) args.map);
#else
args.name = name;
#ifdef SHARED
- if (__builtin_expect (_dl_open_hook != NULL, 0))
+ if (__glibc_unlikely (_dl_open_hook != NULL))
return _dl_open_hook->dlsym (map, name);
#endif
return (dlerror_run (do_dlsym, &args) ? NULL
__libc_dlclose (void *map)
{
#ifdef SHARED
- if (__builtin_expect (_dl_open_hook != NULL, 0))
+ if (__glibc_unlikely (_dl_open_hook != NULL))
return _dl_open_hook->dlclose (map);
#endif
return dlerror_run (do_dlclose, map);
do
{
- if (__builtin_expect (*name == '$', 0))
+ if (__glibc_unlikely (*name == '$'))
{
const char *repl = NULL;
size_t len;
dirp->status[cnt] = init_val;
dirp->what = what;
- if (__builtin_expect (where != NULL, 1))
+ if (__glibc_likely (where != NULL))
dirp->where = memcpy ((char *) dirp + sizeof (*dirp) + len + 1
+ (ncapstr * sizeof (enum r_dir_status)),
where, where_len);
#ifdef SHARED
/* Expand DSTs. */
size_t cnt = DL_DST_COUNT (llp, 1);
- if (__builtin_expect (cnt == 0, 1))
+ if (__glibc_likely (cnt == 0))
llp_tmp = strdupa (llp);
else
{
bool make_consistent = false;
/* Get file information. */
- if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st) < 0, 0))
+ if (__glibc_unlikely (__fxstat64 (_STAT_VER, fd, &st) < 0))
{
errstring = N_("cannot stat shared object");
call_lose_errno:
}
/* Print debugging message. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("file=%s [%lu]; generating link map\n", name, nsid);
/* This is the ELF header. We read it in `open_verify'. */
/* Enter the new object in the list of loaded objects. */
l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
- if (__builtin_expect (l == NULL, 0))
+ if (__glibc_unlikely (l == NULL))
{
#ifdef SHARED
fail_new:
/* Now we install the TCB in the thread register. */
errstring = TLS_INIT_TP (tcb, 0);
- if (__builtin_expect (errstring == NULL, 1))
+ if (__glibc_likely (errstring == NULL))
{
/* Now we are all good. */
l->l_tls_modid = ++GL(dl_tls_max_dtv_idx);
break;
}
- if (__builtin_expect (nloadcmds == 0, 0))
+ if (__glibc_unlikely (nloadcmds == 0))
{
/* This only happens for a bogus object that will be caught with
another error below. But we don't want to go through the
c->prot,
MAP_COPY|MAP_FILE,
fd, c->mapoff);
- if (__builtin_expect ((void *) l->l_map_start == MAP_FAILED, 0))
+ if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
{
map_error:
errstring = N_("failed to map segment from shared object");
/* This object is loaded at a fixed address. This must never
happen for objects loaded with dlopen(). */
- if (__builtin_expect ((mode & __RTLD_OPENEXEC) == 0, 0))
+ if (__glibc_unlikely ((mode & __RTLD_OPENEXEC) == 0))
{
errstring = N_("cannot dynamically load executable");
goto call_lose;
if (zeropage > zero)
{
/* Zero the final part of the last page of the segment. */
- if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
+ if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0))
{
/* Dag nab it. */
if (__mprotect ((caddr_t) (zero
}
}
memset ((void *) zero, '\0', zeropage - zero);
- if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
+ if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0))
__mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot);
}
mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
-1, 0);
- if (__builtin_expect (mapat == MAP_FAILED, 0))
+ if (__glibc_unlikely (mapat == MAP_FAILED))
{
errstring = N_("cannot map zero-fill pages");
goto call_lose_errno;
if (l->l_ld == 0)
{
- if (__builtin_expect (type == ET_DYN, 0))
+ if (__glibc_unlikely (type == ET_DYN))
{
errstring = N_("object file has no dynamic section");
goto call_lose;
/* Adjust the PT_PHDR value by the runtime load address. */
l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
- if (__builtin_expect ((stack_flags &~ GL(dl_stack_flags)) & PF_X, 0))
+ if (__glibc_unlikely ((stack_flags &~ GL(dl_stack_flags)) & PF_X))
{
if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso),
0) != 0)
const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr
+ m->l_relro_size)
& -GLRO(dl_pagesize));
- if (__builtin_expect (p + s <= relro_end, 1))
+ if (__glibc_likely (p + s <= relro_end))
{
/* The variable lies in the region protected by RELRO. */
if (__mprotect ((void *) p, s, PROT_READ|PROT_WRITE) < 0)
l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
/* We are done mapping in the file. We no longer need the descriptor. */
- if (__builtin_expect (__close (fd) != 0, 0))
+ if (__glibc_unlikely (__close (fd) != 0))
{
errstring = N_("cannot close file descriptor");
goto call_lose_errno;
l->l_entry += l->l_addr;
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\
dynamic: 0x%0*lx base: 0x%0*lx size: 0x%0*Zx\n\
entry: 0x%0*lx phdr: 0x%0*lx phnum: %*u\n\n",
ehdr = (ElfW(Ehdr) *) fbp->buf;
/* Now run the tests. */
- if (__builtin_expect (fbp->len < (ssize_t) sizeof (ElfW(Ehdr)), 0))
+ if (__glibc_unlikely (fbp->len < (ssize_t) sizeof (ElfW(Ehdr))))
{
errval = errno;
errstring = (errval == 0
const char *current_what = NULL;
int any = 0;
- if (__builtin_expect (dirs == NULL, 0))
+ if (__glibc_unlikely (dirs == NULL))
/* We're called before _dl_init_paths when loading the main executable
given on the command line when rtld is run directly. */
return -1;
- buf);
/* Print name we try if this is wanted. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS))
_dl_debug_printf (" trying file=%s\n", buf);
fd = open_verify (buf, fbp, loader, whatcode, found_other_class,
while (*++dirs != NULL);
/* Remove the whole path if none of the directories exists. */
- if (__builtin_expect (! any, 0))
+ if (__glibc_unlikely (! any))
{
/* Paths which were allocated using the minimal malloc() in ld.so
must not be freed using the general free() in libc. */
size_t namelen = strlen (name) + 1;
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS))
_dl_debug_printf ("find library=%s [%lu]; searching\n", name, nsid);
fd = -1;
fd = open_verify (cached,
&fb, loader ?: GL(dl_ns)[nsid]._ns_loaded,
LA_SER_CONFIG, &found_other_class, false);
- if (__builtin_expect (fd != -1, 1))
+ if (__glibc_likely (fd != -1))
{
realname = local_strdup (cached);
if (realname == NULL)
&realname, &fb, l, LA_SER_DEFAULT, &found_other_class);
/* Add another newline when we are tracing the library loading. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS))
_dl_debug_printf ("\n");
}
else
continue;
/* Print some debugging info if wanted. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SYMBOLS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SYMBOLS))
_dl_debug_printf ("symbol=%s; lookup in file=%s [%lu]\n",
undef_name, DSO_FILENAME (map->l_name),
map->l_ns);
#define ALLOWED_STT \
((1 << STT_NOTYPE) | (1 << STT_OBJECT) | (1 << STT_FUNC) \
| (1 << STT_COMMON) | (1 << STT_TLS) | (1 << STT_GNU_IFUNC))
- if (__builtin_expect (((1 << stt) & ALLOWED_STT) == 0, 0))
+ if (__glibc_unlikely (((1 << stt) & ALLOWED_STT) == 0))
return NULL;
if (sym != ref && strcmp (strtab + sym->st_name, undef_name))
const ElfW(Half) *verstab = map->l_versyms;
if (version != NULL)
{
- if (__builtin_expect (verstab == NULL, 0))
+ if (__glibc_unlikely (verstab == NULL))
{
/* We need a versioned symbol but haven't found any. If
this is the object which is referenced in the verneed
const ElfW(Sym) *sym;
const ElfW(Addr) *bitmask = map->l_gnu_bitmask;
- if (__builtin_expect (bitmask != NULL, 1))
+ if (__glibc_likely (bitmask != NULL))
{
ElfW(Addr) bitmask_word
= bitmask[(new_hash / __ELF_NATIVE_CLASS)
{
case STB_WEAK:
/* Weak definition. Use this value if we don't find another. */
- if (__builtin_expect (GLRO(dl_dynamic_weak), 0))
+ if (__glibc_unlikely (GLRO(dl_dynamic_weak)))
{
if (! result->s)
{
LD_TRACE_PRELINKING in _dl_debug_bindings. Don't
allocate anything and don't enter anything into the
hash table. */
- if (__builtin_expect (tab->size, 0))
+ if (__glibc_unlikely (tab->size))
{
assert (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK);
__rtld_lock_unlock_recursive (tab->lock);
unsigned long long serial = map->l_serial;
/* Make sure nobody can unload the object while we are at it. */
- if (__builtin_expect (flags & DL_LOOKUP_GSCOPE_LOCK, 0))
+ if (__glibc_unlikely (flags & DL_LOOKUP_GSCOPE_LOCK))
{
/* We can't just call __rtld_lock_lock_recursive (GL(dl_load_lock))
here, that can result in ABBA deadlock. */
}
/* Add the reference now. */
- if (__builtin_expect (l_reldepsact >= undef_map->l_reldepsmax, 0))
+ if (__glibc_unlikely (l_reldepsact >= undef_map->l_reldepsmax))
{
/* Allocate more memory for the dependency list. Since this
can never happen during the startup phase we can use
}
/* Display information if we are debugging. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\
\nfile=%s [%lu]; needed by %s [%lu] (relocation dependency)\n\n",
DSO_FILENAME (map->l_name),
/* Release the lock. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
- if (__builtin_expect (flags & DL_LOOKUP_GSCOPE_LOCK, 0))
+ if (__glibc_unlikely (flags & DL_LOOKUP_GSCOPE_LOCK))
THREAD_GSCOPE_SET_FLAG ();
return result;
== 0);
size_t i = 0;
- if (__builtin_expect (skip_map != NULL, 0))
+ if (__glibc_unlikely (skip_map != NULL))
/* Search the relevant loaded objects for a definition. */
while ((*scope)->r_list[i] != skip_map)
++i;
}
}
- if (__builtin_expect (current_value.s == NULL, 0))
+ if (__glibc_unlikely (current_value.s == NULL))
{
if ((*ref == NULL || ELFW(ST_BIND) ((*ref)->st_info) != STB_WEAK)
&& skip_map == NULL
int protected = (*ref
&& ELFW(ST_VISIBILITY) ((*ref)->st_other) == STV_PROTECTED);
- if (__builtin_expect (protected != 0, 0))
+ if (__glibc_unlikely (protected != 0))
{
/* It is very tricky. We need to figure out what value to
return for the protected symbol. */
version, type_class, flags, skip_map);
/* The object is used. */
- if (__builtin_expect (current_value.m->l_used == 0, 0))
+ if (__glibc_unlikely (current_value.m->l_used == 0))
current_value.m->l_used = 1;
if (__builtin_expect (GLRO(dl_debug_mask)
/* Insufficient space left; allocate another page. */
caddr_t page;
size_t nup = (n + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1);
- if (__builtin_expect (nup == 0, 0))
+ if (__glibc_unlikely (nup == 0))
{
if (n)
return NULL;
ns->_ns_main_searchlist->r_list[new_nlist++] = map;
/* We modify the global scope. Report this. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
_dl_debug_printf ("\nadd %s [%lu] to global scope\n",
map->l_name, map->l_ns);
}
return;
}
- if (__builtin_expect (mode & __RTLD_SPROF, 0))
+ if (__glibc_unlikely (mode & __RTLD_SPROF))
/* This happens only if we load a DSO for 'sprof'. */
return;
++new->l_direct_opencount;
/* It was already open. */
- if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
+ if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
{
/* Let the user know about the opencount. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
new->l_name, new->l_ns, new->l_direct_opencount);
#ifdef SHARED
/* Auditing checkpoint: we have added all objects. */
- if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
+ if (__glibc_unlikely (GLRO(dl_naudit) > 0))
{
struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
/* Do not call the functions for any auditing object. */
LIBC_PROBE (map_complete, 3, args->nsid, r, new);
/* Print scope information. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
_dl_show_scope (new, 0);
/* Only do lazy relocation if `LD_BIND_NOW' is not set. */
if (runp != NULL)
/* Look through the dependencies of the object. */
while (*runp != NULL)
- if (__builtin_expect (*runp++ == thisp, 0))
+ if (__glibc_unlikely (*runp++ == thisp))
{
/* Move the current object to the back past the last
object with it as the dependency. */
}
#ifdef SHARED
- if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
+ if (__glibc_unlikely (GLRO(dl_profile) != NULL))
{
/* If this here is the shared object which we want to profile
make sure the profile is started. We can find out whether
/* Avoid duplicates. */
continue;
- if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
+ if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
{
/* The 'r_scope' array is too small. Allocate a new one
dynamically. */
}
/* Print scope information. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
_dl_show_scope (imap, from_scope);
}
/* Mark the object as not deletable if the RTLD_NODELETE flags was
passed. */
- if (__builtin_expect (mode & RTLD_NODELETE, 0))
+ if (__glibc_unlikely (mode & RTLD_NODELETE))
new->l_flags_1 |= DF_1_NODELETE;
#ifndef SHARED
#endif
/* Let the user know about the opencount. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
new->l_name, new->l_ns, new->l_direct_opencount);
}
/* Make sure we are alone. */
__rtld_lock_lock_recursive (GL(dl_load_lock));
- if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
+ if (__glibc_unlikely (nsid == LM_ID_NEWLM))
{
/* Find a new namespace. */
for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
if (GL(dl_ns)[nsid]._ns_loaded == NULL)
break;
- if (__builtin_expect (nsid == DL_NNS, 0))
+ if (__glibc_unlikely (nsid == DL_NNS))
{
/* No more namespace available. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
#endif
/* See if an error occurred during loading. */
- if (__builtin_expect (errstring != NULL, 0))
+ if (__glibc_unlikely (errstring != NULL))
{
/* Remove the object from memory. It may be in an inconsistent
state if relocation failed, for example. */
&& __builtin_expect (l->l_info[DT_BIND_NOW] != NULL, 0))
lazy = 0;
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_RELOC, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_RELOC))
_dl_debug_printf ("\nrelocation processing: %s%s\n",
DSO_FILENAME (l->l_name), lazy ? " (lazy)" : "");
/* DT_TEXTREL is now in level 2 and might phase out at some time.
But we rewrite the DT_FLAGS entry to a DT_TEXTREL entry to make
testing easier and therefore it will be available at all time. */
- if (__builtin_expect (l->l_info[DT_TEXTREL] != NULL, 0))
+ if (__glibc_unlikely (l->l_info[DT_TEXTREL] != NULL))
{
/* Bletch. We must make read-only segments writable
long enough to relocate them. */
ELF_DYNAMIC_RELOCATE (l, lazy, consider_profiling, skip_ifunc);
#ifndef PROF
- if (__builtin_expect (consider_profiling, 0))
+ if (__glibc_unlikely (consider_profiling))
{
/* Allocate the array which will contain the already found
relocations. If the shared object lacks a PLT (for example
value = elf_ifunc_invoke (DL_FIXUP_VALUE_ADDR (value));
/* Finally, fix up the plt itself. */
- if (__builtin_expect (GLRO(dl_bind_not), 0))
+ if (__glibc_unlikely (GLRO(dl_bind_not)))
return value;
return elf_machine_fixup_plt (l, result, reloc, rel_addr, value);
#endif
/* Store the result for later runs. */
- if (__builtin_expect (! GLRO(dl_bind_not), 1))
+ if (__glibc_likely (! GLRO(dl_bind_not)))
*resultp = value;
}
THREAD_GSCOPE_RESET_FLAG ();
- if (__builtin_expect (errstring != NULL, 0))
+ if (__glibc_unlikely (errstring != NULL))
{
/* The lookup was unsuccessful. Rethrow the error. */
char *errstring_dup = strdupa (errstring);
}
else if (handle == RTLD_NEXT)
{
- if (__builtin_expect (match == GL(dl_ns)[LM_ID_BASE]._ns_loaded, 0))
+ if (__glibc_unlikely (match == GL(dl_ns)[LM_ID_BASE]._ns_loaded))
{
if (match == NULL
|| caller < match->l_map_start
value = DL_SYMBOL_ADDRESS (result, ref);
/* Resolve indirect function address. */
- if (__builtin_expect (ELFW(ST_TYPE) (ref->st_info) == STT_GNU_IFUNC, 0))
+ if (__glibc_unlikely (ELFW(ST_TYPE) (ref->st_info) == STT_GNU_IFUNC))
{
DL_FIXUP_VALUE_TYPE fixup
= DL_FIXUP_MAKE_VALUE (result, (ElfW(Addr)) value);
/* Auditing checkpoint: we have a new binding. Provide the
auditing libraries the possibility to change the value and
tell us whether further auditing is wanted. */
- if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
+ if (__glibc_unlikely (GLRO(dl_naudit) > 0))
{
const char *strtab = (const char *) D_PTR (result,
l_info[DT_STRTAB]);
!= FORCED_DYNAMIC_TLS_OFFSET, 0))
{
__rtld_lock_lock_recursive (GL(dl_load_lock));
- if (__builtin_expect (the_map->l_tls_offset == NO_TLS_OFFSET, 1))
+ if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
{
the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
__rtld_lock_unlock_recursive (GL(dl_load_lock));
!= FORCED_DYNAMIC_TLS_OFFSET, 1))
{
void *p = dtv[GET_ADDR_MODULE].pointer.val;
- if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
goto again;
return (char *) p + GET_ADDR_OFFSET;
void *p = dtv[GET_ADDR_MODULE].pointer.val;
- if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
return tls_get_addr_tail (GET_ADDR_PARAM, dtv, the_map);
return (void *) p + GET_ADDR_OFFSET;
{
dtv_t *dtv = THREAD_DTV ();
- if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
+ if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
return update_get_addr (GET_ADDR_PARAM);
void *p = dtv[GET_ADDR_MODULE].pointer.val;
- if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
return tls_get_addr_tail (GET_ADDR_PARAM, dtv, NULL);
return (char *) p + GET_ADDR_OFFSET;
void *
_dl_tls_get_addr_soft (struct link_map *l)
{
- if (__builtin_expect (l->l_tls_modid == 0, 0))
+ if (__glibc_unlikely (l->l_tls_modid == 0))
/* This module has no TLS segment. */
return NULL;
dtv_t *dtv = THREAD_DTV ();
- if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
+ if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
{
/* This thread's DTV is not completely current,
but it might already cover this module. */
}
void *data = dtv[l->l_tls_modid].pointer.val;
- if (__builtin_expect (data == TLS_DTV_UNALLOCATED, 0))
+ if (__glibc_unlikely (data == TLS_DTV_UNALLOCATED))
/* The DTV is current, but this thread has not yet needed
to allocate this module's segment. */
data = NULL;
int result = 0;
/* Display information about what we are doing while debugging. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_VERSIONS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_VERSIONS))
_dl_debug_printf ("\
checking for version `%s' in file %s [%lu] required by file %s [%lu]\n",
string, DSO_FILENAME (map->l_name),
map->l_ns, name, ns);
- if (__builtin_expect (map->l_info[VERSYMIDX (DT_VERDEF)] == NULL, 0))
+ if (__glibc_unlikely (map->l_info[VERSYMIDX (DT_VERDEF)] == NULL))
{
/* The file has no symbol versioning. I.e., the dependent
object was linked against another version of this file. We
}
/* Symbol not found. If it was a weak reference it is not fatal. */
- if (__builtin_expect (weak, 1))
+ if (__glibc_likely (weak))
{
if (verbose)
{
section. */
map->l_versions = (struct r_found_version *)
calloc (ndx_high + 1, sizeof (*map->l_versions));
- if (__builtin_expect (map->l_versions == NULL, 0))
+ if (__glibc_unlikely (map->l_versions == NULL))
{
errstring = N_("cannot allocate version reference table");
errval = ENOMEM;
{
ElfW(Half) ndx = aux->vna_other & 0x7fff;
/* In trace mode, dependencies may be missing. */
- if (__builtin_expect (ndx < map->l_nversions, 1))
+ if (__glibc_likely (ndx < map->l_nversions))
{
map->l_versions[ndx].hash = aux->vna_hash;
map->l_versions[ndx].hidden = aux->vna_other & 0x8000;
errno when it's being used by another thread that cares about it.
Yet we must be sure not to try calling the lock functions before
the thread library is fully initialized. */
- if (__builtin_expect (INTUSE (_dl_starting_up), 0))
+ if (__glibc_unlikely (INTUSE (_dl_starting_up)))
__writev (fd, iov, niov);
else
{
lstat_buf.st_mode = DTTOIF (direntry->d_type);
else
#endif
- if (__builtin_expect (lstat64 (real_file_name, &lstat_buf), 0))
+ if (__glibc_unlikely (lstat64 (real_file_name, &lstat_buf)))
{
error (0, errno, _("Cannot lstat %s"), file_name);
continue;
continue;
}
}
- if (__builtin_expect (stat64 (target_name, &stat_buf), 0))
+ if (__glibc_unlikely (stat64 (target_name, &stat_buf)))
{
if (opt_verbose)
error (0, errno, _("Cannot stat %s"), file_name);
}
#endif
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS))
{
#ifndef HP_TIMING_NONAVAIL
print_statistics (&rtld_total_time);
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
- if (__builtin_expect (lossage != NULL, 0))
+ if (__glibc_unlikely (lossage != NULL))
_dl_fatal_printf ("cannot set up thread-local storage: %s\n",
lossage);
#else
= TLS_INIT_TP (tcbp, 0);
#endif
- if (__builtin_expect (lossage != NULL, 0))
+ if (__glibc_unlikely (lossage != NULL))
_dl_fatal_printf ("cannot set up thread-local storage: %s\n", lossage);
tls_init_tp_called = true;
unsigned int old_nloaded = GL(dl_ns)[LM_ID_BASE]._ns_nloaded;
(void) _dl_catch_error (&objname, &err_str, &malloced, map_doit, &args);
- if (__builtin_expect (err_str != NULL, 0))
+ if (__glibc_unlikely (err_str != NULL))
{
_dl_error_printf ("\
ERROR: ld.so: object '%s' from %s cannot be preloaded (%s): ignored.\n",
args.mode = __RTLD_OPENEXEC;
(void) _dl_catch_error (&objname, &err_str, &malloced, map_doit,
&args);
- if (__builtin_expect (err_str != NULL, 0))
+ if (__glibc_unlikely (err_str != NULL))
/* We don't free the returned string, the programs stops
anyway. */
_exit (EXIT_FAILURE);
GL(dl_rtld_map).l_tls_modid = _dl_next_tls_modid ();
/* If we have auditing DSOs to load, do it now. */
- if (__builtin_expect (audit_list != NULL, 0))
+ if (__glibc_unlikely (audit_list != NULL))
{
/* Iterate over all entries in the list. The order is important. */
struct audit_ifaces *last_audit = NULL;
bool malloced;
(void) _dl_catch_error (&objname, &err_str, &malloced, dlmopen_doit,
&dlmargs);
- if (__builtin_expect (err_str != NULL, 0))
+ if (__glibc_unlikely (err_str != NULL))
{
not_loaded:
_dl_error_printf ("\
/* If we have any auditing modules, announce that we already
have two objects loaded. */
- if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
+ if (__glibc_unlikely (GLRO(dl_naudit) > 0))
{
struct link_map *ls[2] = { main_map, &GL(dl_rtld_map) };
/* Auditing checkpoint: we are ready to signal that the initial map
is being constructed. */
- if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
+ if (__glibc_unlikely (GLRO(dl_naudit) > 0))
{
struct audit_ifaces *afct = GLRO(dl_audit);
for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
struct link_map **preloads = NULL;
unsigned int npreloads = 0;
- if (__builtin_expect (preloadlist != NULL, 0))
+ if (__glibc_unlikely (preloadlist != NULL))
{
/* The LD_PRELOAD environment variable gives list of libraries
separated by white space or colons that are loaded before the
the work but this does not matter, since it is not for production
use. */
static const char preload_file[] = "/etc/ld.so.preload";
- if (__builtin_expect (__access (preload_file, R_OK) == 0, 0))
+ if (__glibc_unlikely (__access (preload_file, R_OK) == 0))
{
/* Read the contents of the file. */
file = _dl_sysdep_read_whole_file (preload_file, &file_size,
PROT_READ | PROT_WRITE);
- if (__builtin_expect (file != MAP_FAILED, 0))
+ if (__glibc_unlikely (file != MAP_FAILED))
{
/* Parse the file. It contains names of libraries to be loaded,
separated by white spaces or `:'. It may also contain
}
}
- if (__builtin_expect (*first_preload != NULL, 0))
+ if (__glibc_unlikely (*first_preload != NULL))
{
/* Set up PRELOADS with a vector of the preloaded libraries. */
struct link_map *l = *first_preload;
break;
bool rtld_multiple_ref = false;
- if (__builtin_expect (i < main_map->l_searchlist.r_nlist, 1))
+ if (__glibc_likely (i < main_map->l_searchlist.r_nlist))
{
/* Some DT_NEEDED entry referred to the interpreter object itself, so
put it back in the list of visible objects. We insert it into the
if (tcbp == NULL)
tcbp = init_tls ();
- if (__builtin_expect (audit_list == NULL, 1))
+ if (__glibc_likely (audit_list == NULL))
/* Initialize security features. But only if we have not done it
earlier. */
security_init ();
if (r_list == r_listend && liblist == liblistend)
prelinked = true;
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS))
_dl_debug_printf ("\nprelink checking: %s\n",
prelinked ? "ok" : "failed");
}
GLRO(dl_init_all_dirs) = GL(dl_all_dirs);
/* Print scope information. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
{
_dl_debug_printf ("\nInitial object scopes\n");
this has to go here because the calls it makes should use the
rtld versions of the functions (particularly calloc()), but it
needs to have _dl_profile_map set up by the relocator. */
- if (__builtin_expect (GL(dl_profile_map) != NULL, 0))
+ if (__glibc_unlikely (GL(dl_profile_map) != NULL))
/* We must prepare the profiling. */
_dl_start_profile ();
}
#else
= TLS_INIT_TP (tcbp, 0);
#endif
- if (__builtin_expect (lossage != NULL, 0))
+ if (__glibc_unlikely (lossage != NULL))
_dl_fatal_printf ("cannot set up thread-local storage: %s\n",
lossage);
}
#ifdef SHARED
/* Auditing checkpoint: we have added all objects. */
- if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
+ if (__glibc_unlikely (GLRO(dl_naudit) > 0))
{
struct link_map *head = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
/* Do not call the functions for any auditing object. */
mapped and relocated it normally. */
struct link_map *l = _dl_new_object ((char *) "", "", lt_library, NULL,
0, LM_ID_BASE);
- if (__builtin_expect (l != NULL, 1))
+ if (__glibc_likely (l != NULL))
{
static ElfW(Dyn) dyn_temp[DL_RO_DYN_TEMP_CNT] attribute_relro;
{
/* Matches user and not yet on the list. Insert
this group. */
- if (__builtin_expect (*start == *size, 0))
+ if (__glibc_unlikely (*start == *size))
{
/* Need a bigger buffer. */
gid_t *newgroups;
char *new_buf;
buffer_size += NSS_BUFLEN_GROUP;
new_buf = realloc (buffer, buffer_size);
- if (__builtin_expect (new_buf == NULL, 0))
+ if (__glibc_unlikely (new_buf == NULL))
{
/* We are out of memory. Free the current buffer so that the
process gets a chance for a normal termination. */
long int size = MAX (1, *ngroups);
gid_t *newgroups = (gid_t *) malloc (size * sizeof (gid_t));
- if (__builtin_expect (newgroups == NULL, 0))
+ if (__glibc_unlikely (newgroups == NULL))
/* No more memory. */
// XXX This is wrong. The user provided memory, we have to use
// XXX it. The internal functions must be called with the user
size = 16;
groups = (gid_t *) malloc (size * sizeof (gid_t));
- if (__builtin_expect (groups == NULL, 0))
+ if (__glibc_unlikely (groups == NULL))
/* No more memory. */
return -1;
{
int retval;
- if (__builtin_expect (gr == NULL, 0) || __builtin_expect (stream == NULL, 0))
+ if (__glibc_unlikely (gr == NULL) || __glibc_unlikely (stream == NULL))
{
__set_errno (EINVAL);
return -1;
if (status == NSS_STATUS_SUCCESS
&& !internal_gid_in_list (groups, group, *start))
{
- if (__builtin_expect (*start == *size, 0))
+ if (__glibc_unlikely (*start == *size))
{
/* Need a bigger buffer. */
gid_t *newgroups;
error_t
_hurd_ports_use (int which, error_t (*operate) (mach_port_t))
{
- if (__builtin_expect (_hurd_ports == NULL, 0))
+ if (__glibc_unlikely (_hurd_ports == NULL))
/* This means that _hurd_init has not been called yet, which is
normally only the case in the bootstrap filesystem, and there
only in the early phases of booting. */
cache_size = st.st_size;
#ifdef _POSIX_MAPPED_FILES
gconv_cache = __mmap (NULL, cache_size, PROT_READ, MAP_SHARED, fd, 0);
- if (__builtin_expect (gconv_cache == MAP_FAILED, 0))
+ if (__glibc_unlikely (gconv_cache == MAP_FAILED))
#endif
{
size_t already_read;
struct __gconv_trans_data *curp = transp;
transp = transp->__next;
- if (__builtin_expect (curp->__trans_end_fct != NULL, 0))
+ if (__glibc_unlikely (curp->__trans_end_fct != NULL))
curp->__trans_end_fct (curp->__data);
free (curp);
errhand = strchr (toset, '/');
if (errhand != NULL)
errhand = strchr (errhand + 1, '/');
- if (__builtin_expect (errhand != NULL, 1))
+ if (__glibc_likely (errhand != NULL))
{
if (*++errhand == '\0')
errhand = NULL;
struct __gconv_trans_data *curp = transp;
transp = transp->__next;
- if (__builtin_expect (curp->__trans_end_fct != NULL, 0))
+ if (__glibc_unlikely (curp->__trans_end_fct != NULL))
curp->__trans_end_fct (curp->__data);
free (curp);
while (*inptrp < inend && cnt < 4)
state->__value.__wchb[cnt++] = *(*inptrp)++;
- if (__builtin_expect (cnt < 4, 0))
+ if (__glibc_unlikely (cnt < 4))
{
/* Still not enough bytes. Store the ones in the input buffer. */
state->__count &= ~7;
inval = *(const uint32_t *) inptr;
#endif
- if (__builtin_expect (inval > 0x7fffffff, 0))
+ if (__glibc_unlikely (inval > 0x7fffffff))
{
/* The value is too large. We don't try transliteration here since
this is not an error because of the lack of possibilities to
for (cnt = 0; cnt < n_convert; ++cnt, inptr += 4)
{
- if (__builtin_expect (inptr[0] > 0x80, 0))
+ if (__glibc_unlikely (inptr[0] > 0x80))
{
/* The value is too large. We don't try transliteration here since
this is not an error because of the lack of possibilities to
while (*inptrp < inend && cnt < 4)
state->__value.__wchb[cnt++] = *(*inptrp)++;
- if (__builtin_expect (cnt < 4, 0))
+ if (__glibc_unlikely (cnt < 4))
{
/* Still not enough bytes. Store the ones in the input buffer. */
state->__count &= ~7;
while (*inptrp < inend && cnt < 4)
state->__value.__wchb[cnt++] = *(*inptrp)++;
- if (__builtin_expect (cnt < 4, 0))
+ if (__glibc_unlikely (cnt < 4))
{
/* Still not enough bytes. Store the ones in the input buffer. */
state->__count &= ~7;
inval = *(const uint32_t *) inptr;
#endif
- if (__builtin_expect (inval > 0x7fffffff, 0))
+ if (__glibc_unlikely (inval > 0x7fffffff))
{
/* The value is too large. We don't try transliteration here since
this is not an error because of the lack of possibilities to
for (cnt = 0; cnt < n_convert; ++cnt, inptr += 4)
{
- if (__builtin_expect (inptr[3] > 0x80, 0))
+ if (__glibc_unlikely (inptr[3] > 0x80))
{
/* The value is too large. We don't try transliteration here since
this is not an error because of the lack of possibilities to
while (*inptrp < inend && cnt < 4)
state->__value.__wchb[cnt++] = *(*inptrp)++;
- if (__builtin_expect (cnt < 4, 0))
+ if (__glibc_unlikely (cnt < 4))
{
/* Still not enough bytes. Store the ones in the input buffer. */
state->__count &= ~7;
#define LOOPFCT FROM_LOOP
#define BODY \
{ \
- if (__builtin_expect (*inptr > '\x7f', 0)) \
+ if (__glibc_unlikely (*inptr > '\x7f')) \
{ \
/* The value is too large. We don't try transliteration here since \
this is not an error because of the lack of possibilities to \
#define LOOPFCT FROM_LOOP
#define BODY \
{ \
- if (__builtin_expect (*((const uint32_t *) inptr) > 0x7f, 0)) \
+ if (__glibc_unlikely (*((const uint32_t *) inptr) > 0x7f)) \
{ \
UNICODE_TAG_HANDLER (*((const uint32_t *) inptr), 4); \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
{ \
uint32_t wc = *((const uint32_t *) inptr); \
\
- if (__builtin_expect (wc < 0x80, 1)) \
+ if (__glibc_likely (wc < 0x80)) \
/* It's an one byte sequence. */ \
*outptr++ = (unsigned char) wc; \
- else if (__builtin_expect (wc <= 0x7fffffff, 1)) \
+ else if (__glibc_likely (wc <= 0x7fffffff)) \
{ \
size_t step; \
unsigned char *start; \
if ((wc & (~(uint32_t)0 << (5 * step + 1))) == 0) \
break; \
\
- if (__builtin_expect (outptr + step > outend, 0)) \
+ if (__glibc_unlikely (outptr + step > outend)) \
{ \
/* Too long. */ \
result = __GCONV_FULL_OUTPUT; \
/* Next input byte. */ \
uint32_t ch = *inptr; \
\
- if (__builtin_expect (ch < 0x80, 1)) \
+ if (__glibc_likely (ch < 0x80)) \
{ \
/* One byte sequence. */ \
++inptr; \
cnt = 2; \
ch &= 0x1f; \
} \
- else if (__builtin_expect ((ch & 0xf0) == 0xe0, 1)) \
+ else if (__glibc_likely ((ch & 0xf0) == 0xe0)) \
{ \
/* We expect three bytes. */ \
cnt = 3; \
ch &= 0x0f; \
} \
- else if (__builtin_expect ((ch & 0xf8) == 0xf0, 1)) \
+ else if (__glibc_likely ((ch & 0xf8) == 0xf0)) \
{ \
/* We expect four bytes. */ \
cnt = 4; \
ch &= 0x07; \
} \
- else if (__builtin_expect ((ch & 0xfc) == 0xf8, 1)) \
+ else if (__glibc_likely ((ch & 0xfc) == 0xf8)) \
{ \
/* We expect five bytes. */ \
cnt = 5; \
ch &= 0x03; \
} \
- else if (__builtin_expect ((ch & 0xfe) == 0xfc, 1)) \
+ else if (__glibc_likely ((ch & 0xfe) == 0xfc)) \
{ \
/* We expect six bytes. */ \
cnt = 6; \
STANDARD_FROM_LOOP_ERR_HANDLER (i); \
} \
\
- if (__builtin_expect (inptr + cnt > inend, 0)) \
+ if (__glibc_unlikely (inptr + cnt > inend)) \
{ \
/* We don't have enough input. But before we report that check \
that all the bytes are correct. */ \
if ((inptr[i] & 0xc0) != 0x80) \
break; \
\
- if (__builtin_expect (inptr + i == inend, 1)) \
+ if (__glibc_likely (inptr + i == inend)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
cnt = 2; \
ch &= 0x1f; \
} \
- else if (__builtin_expect ((ch & 0xf0) == 0xe0, 1)) \
+ else if (__glibc_likely ((ch & 0xf0) == 0xe0)) \
{ \
/* We expect three bytes. */ \
cnt = 3; \
ch &= 0x0f; \
} \
- else if (__builtin_expect ((ch & 0xf8) == 0xf0, 1)) \
+ else if (__glibc_likely ((ch & 0xf8) == 0xf0)) \
{ \
/* We expect four bytes. */ \
cnt = 4; \
ch &= 0x07; \
} \
- else if (__builtin_expect ((ch & 0xfc) == 0xf8, 1)) \
+ else if (__glibc_likely ((ch & 0xfc) == 0xf8)) \
{ \
/* We expect five bytes. */ \
cnt = 5; \
{ \
uint16_t u1 = get16 (inptr); \
\
- if (__builtin_expect (u1 >= 0xd800 && u1 < 0xe000, 0)) \
+ if (__glibc_unlikely (u1 >= 0xd800 && u1 < 0xe000)) \
{ \
/* Surrogate characters in UCS-2 input are not valid. Reject \
them. (Catching this here is not security relevant.) */ \
{ \
uint32_t val = *((const uint32_t *) inptr); \
\
- if (__builtin_expect (val >= 0x10000, 0)) \
+ if (__glibc_unlikely (val >= 0x10000)) \
{ \
UNICODE_TAG_HANDLER (val, 4); \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
} \
- else if (__builtin_expect (val >= 0xd800 && val < 0xe000, 0)) \
+ else if (__glibc_unlikely (val >= 0xd800 && val < 0xe000)) \
{ \
/* Surrogate characters in UCS-4 input are not valid. \
We must catch this, because the UCS-2 output might be \
{ \
uint16_t u1 = bswap_16 (get16 (inptr)); \
\
- if (__builtin_expect (u1 >= 0xd800 && u1 < 0xe000, 0)) \
+ if (__glibc_unlikely (u1 >= 0xd800 && u1 < 0xe000)) \
{ \
/* Surrogate characters in UCS-2 input are not valid. Reject \
them. (Catching this here is not security relevant.) */ \
#define BODY \
{ \
uint32_t val = *((const uint32_t *) inptr); \
- if (__builtin_expect (val >= 0x10000, 0)) \
+ if (__glibc_unlikely (val >= 0x10000)) \
{ \
UNICODE_TAG_HANDLER (val, 4); \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
} \
- else if (__builtin_expect (val >= 0xd800 && val < 0xe000, 0)) \
+ else if (__glibc_unlikely (val >= 0xd800 && val < 0xe000)) \
{ \
/* Surrogate characters in UCS-4 input are not valid. \
We must catch this, because the UCS-2 output might be \
size_t irreversible;
int result;
- if (__builtin_expect (inbuf == NULL || *inbuf == NULL, 0))
+ if (__glibc_unlikely (inbuf == NULL || *inbuf == NULL))
{
if (outbuf == NULL || *outbuf == NULL)
result = __gconv (gcd, NULL, NULL, NULL, NULL, &irreversible);
int
iconv_close (iconv_t cd)
{
- if (__builtin_expect (cd == (iconv_t *) -1L, 0))
+ if (__glibc_unlikely (cd == (iconv_t *) -1L))
{
__set_errno (EBADF);
return -1;
/* If any of them recognized the input continue with the loop. */ \
if (result != __GCONV_ILLEGAL_INPUT) \
{ \
- if (__builtin_expect (result == __GCONV_FULL_OUTPUT, 0)) \
+ if (__glibc_unlikely (result == __GCONV_FULL_OUTPUT)) \
break; \
\
continue; \
bytes from the state and at least one more, or the character is still
incomplete, or we have some other error (like illegal input character,
no space in output buffer). */
- if (__builtin_expect (inptr != bytebuf, 1))
+ if (__glibc_likely (inptr != bytebuf))
{
/* We found a new character. */
assert (inptr - bytebuf > (state->__count & 7));
/* If the function is called with no input this means we have to reset
to the initial state. The possibly partly converted input is
dropped. */
- if (__builtin_expect (do_flush, 0))
+ if (__glibc_unlikely (do_flush))
{
/* This should never happen during error handling. */
assert (outbufstart == NULL);
if (result != __GCONV_EMPTY_INPUT)
{
- if (__builtin_expect (outerr != outbuf, 0))
+ if (__glibc_unlikely (outerr != outbuf))
{
/* We have a problem. Undo the conversion. */
outbuf = outstart;
SAVE_RESET_STATE (1);
#endif
- if (__builtin_expect (!unaligned, 1))
+ if (__glibc_likely (!unaligned))
{
if (FROM_DIRECTION)
/* Run the conversion loop. */
/* If we were called as part of an error handling module we
don't do anything else here. */
- if (__builtin_expect (outbufstart != NULL, 0))
+ if (__glibc_unlikely (outbufstart != NULL))
{
*outbufstart = outbuf;
return status;
/* If this is the last step leave the loop, there is nothing
we can do. */
- if (__builtin_expect (data->__flags & __GCONV_IS_LAST, 0))
+ if (__glibc_unlikely (data->__flags & __GCONV_IS_LAST))
{
/* Store information about how many bytes are available. */
data->__outbuf = outbuf;
}
/* Write out all output which was produced. */
- if (__builtin_expect (outbuf > outstart, 1))
+ if (__glibc_likely (outbuf > outstart))
{
const unsigned char *outerr = data->__outbuf;
int result;
if (result != __GCONV_EMPTY_INPUT)
{
- if (__builtin_expect (outerr != outbuf, 0))
+ if (__glibc_unlikely (outerr != outbuf))
{
#ifdef RESET_INPUT_BUFFER
RESET_INPUT_BUFFER;
SAVE_RESET_STATE (0);
# endif
- if (__builtin_expect (!unaligned, 1))
+ if (__glibc_likely (!unaligned))
{
if (FROM_DIRECTION)
/* Run the conversion loop. */
/* If we haven't consumed a single byte decrement
the invocation counter. */
- if (__builtin_expect (outbuf == outstart, 0))
+ if (__glibc_unlikely (outbuf == outstart))
--data->__invocation_counter;
#endif /* reset input buffer */
}
uint32_t ch = get32 (inptr); \
unsigned char res; \
\
- if (__builtin_expect (ch >= 0xffff, 0)) \
+ if (__glibc_unlikely (ch >= 0xffff)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
rp = NULL; \
/* Now test for a possible second byte and write this if possible. */ \
if (cp[1] != '\0') \
{ \
- if (__builtin_expect (outptr >= outend, 0)) \
+ if (__glibc_unlikely (outptr >= outend)) \
{ \
/* The result does not fit into the buffer. */ \
--outptr; \
uint32_t ch2; \
int idx; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. */ \
result = __GCONV_INCOMPLETE_INPUT; \
ch = big5_to_ucs[idx]; \
\
/* Is this character defined? */ \
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (2); \
{ \
if (FROM_DIRECTION) \
{ \
- if (__builtin_expect (outbuf + 4 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 4 <= outend)) \
{ \
/* Write out the last character. */ \
*((uint32_t *) outbuf) = data->__statep->__count >> 3; \
} \
else \
{ \
- if (__builtin_expect (outbuf + 2 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 2 <= outend)) \
{ \
/* Write out the last character. */ \
uint32_t lasttwo = data->__statep->__count >> 3; \
\
/* Determine whether there is a buffered character pending. */ \
ch = *statep >> 3; \
- if (__builtin_expect (ch == 0, 1)) \
+ if (__glibc_likely (ch == 0)) \
{ \
/* No - so look at the next input byte. */ \
ch = *inptr; \
uint32_t ch2; \
int idx; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. */ \
result = __GCONV_INCOMPLETE_INPUT; \
\
inptr += 2; \
} \
- else if (__builtin_expect (ch == 0xff, 0)) \
+ else if (__glibc_unlikely (ch == 0xff)) \
{ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
} \
goto not_combining; \
\
/* Output the combined character. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
\
not_combining: \
/* Output the buffered character. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
else \
{ \
/* Check for possible combining character. */ \
- if (__builtin_expect (ch == 0xca || ch == 0xea, 0)) \
+ if (__glibc_unlikely (ch == 0xca || ch == 0xea)) \
{ \
*statep = ((cp[0] << 8) | cp[1]) << 3; \
inptr += 4; \
} \
\
*outptr++ = cp[0]; \
- if (__builtin_expect (cp[1] != '\0', 1)) \
+ if (__glibc_likely (cp[1] != '\0')) \
*outptr++ = cp[1]; \
} \
} \
{ \
if (FROM_DIRECTION) \
{ \
- if (__builtin_expect (outbuf + 4 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 4 <= outend)) \
{ \
/* Write out the last character. */ \
*((uint32_t *) outbuf) = data->__statep->__count >> 3; \
if (ch >= 0x80) \
{ \
ch = to_ucs4[ch - 0x80]; \
- if (__builtin_expect (ch == L'\0', 0)) \
+ if (__glibc_unlikely (ch == L'\0')) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
res = 0; \
} \
\
- if (__builtin_expect (res != 0, 1)) \
+ if (__glibc_likely (res != 0)) \
{ \
*outptr++ = res; \
inptr += 4; \
if (decomp_table[i].comb2 < 0) \
{ \
/* See whether we have room for two bytes. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
else \
{ \
/* See whether we have room for three bytes. */ \
- if (__builtin_expect (outptr + 2 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
if (FROM_DIRECTION) \
{ \
- if (__builtin_expect (outbuf + 4 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 4 <= outend)) \
{ \
/* Write out the last character. */ \
*((uint32_t *) outbuf) = data->__statep->__count >> 3; \
if (ch >= 0x80) \
{ \
ch = to_ucs4[ch - 0x80]; \
- if (__builtin_expect (ch == L'\0', 0)) \
+ if (__glibc_unlikely (ch == L'\0')) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
res = 0; \
} \
\
- if (__builtin_expect (res != 0, 1)) \
+ if (__glibc_likely (res != 0)) \
{ \
*outptr++ = res; \
inptr += 4; \
} \
\
/* See whether we have room for two bytes. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
uint32_t ch2; \
uint_fast32_t idx; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store \
the intermediate result. */ \
/* Now test for a possible second byte and write this if possible. */\
if (cp[1] != '\0') \
{ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
/* The result does not fit into the buffer. */ \
result = __GCONV_FULL_OUTPUT; \
next byte is also available. */ \
const unsigned char *endp; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store \
the intermediate result. */ \
ch = inptr[1]; \
\
/* All second bytes of a multibyte character must be >= 0xa1. */ \
- if (__builtin_expect (ch < 0xa1, 0)) \
+ if (__glibc_unlikely (ch < 0xa1)) \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
\
/* This is code set 1: GB 2312-80. */ \
endp = inptr; \
\
ch = gb2312_to_ucs4 (&endp, 2, 0x80); \
- if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (2); \
{ \
if (FROM_DIRECTION) \
{ \
- if (__builtin_expect (outbuf + 4 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 4 <= outend)) \
{ \
/* Write out the last character. */ \
*((uint32_t *) outbuf) = data->__statep->__count >> 3; \
} \
else \
{ \
- if (__builtin_expect (outbuf + 2 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 2 <= outend)) \
{ \
/* Write out the last character. */ \
uint32_t lasttwo = data->__statep->__count >> 3; \
\
/* Determine whether there is a buffered character pending. */ \
ch = *statep >> 3; \
- if (__builtin_expect (ch == 0, 1)) \
+ if (__glibc_likely (ch == 0)) \
{ \
/* No - so look at the next input byte. */ \
ch = *inptr; \
/* Two or three byte character. */ \
uint32_t ch2; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second byte is not available. */ \
result = __GCONV_INCOMPLETE_INPUT; \
ch2 = inptr[1]; \
\
/* The second byte must be >= 0xa1 and <= 0xfe. */ \
- if (__builtin_expect (ch2 < 0xa1 || ch2 > 0xfe, 0)) \
+ if (__glibc_unlikely (ch2 < 0xa1 || ch2 > 0xfe)) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
if (ch == 0x8e) \
{ \
/* Half-width katakana. */ \
- if (__builtin_expect (ch2 > 0xdf, 0)) \
+ if (__glibc_unlikely (ch2 > 0xdf)) \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
\
ch = ch2 + 0xfec0; \
/* JISX 0213 plane 2. */ \
uint32_t ch3; \
\
- if (__builtin_expect (inptr + 2 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 2 >= inend)) \
{ \
/* The third byte is not available. */ \
result = __GCONV_INCOMPLETE_INPUT; \
if (len > 0) \
{ \
/* Output the combined character. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
\
not_combining: \
/* Output the buffered character. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
else if (ch >= 0xff61 && ch <= 0xff9f) \
{ \
/* Half-width katakana. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
if (jch & 0x8000) \
{ \
/* JISX 0213 plane 2. */ \
- if (__builtin_expect (outptr + 2 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
else \
{ \
/* JISX 0213 plane 1. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
character is also available. */ \
unsigned char ch2; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store the \
intermediate result. */ \
ch2 = (unsigned char)inptr[1]; \
\
/* All second bytes of a multibyte character must be >= 0xa1. */ \
- if (__builtin_expect (ch2 < 0xa1, 0)) \
+ if (__glibc_unlikely (ch2 < 0xa1)) \
{ \
/* This is an illegal character. */ \
if (! ignore_errors_p ()) \
/* This is code set 2: half-width katakana. */ \
ch = jisx0201_to_ucs4 (ch2); \
/*if (__builtin_expect (ch, 0) == __UNKNOWN_10646_CHAR)*/ \
- if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
{ \
/* Illegal character. */ \
if (! ignore_errors_p ()) \
/* Now test for a possible second byte and write this if possible. */\
if (cp[1] != '\0') \
{ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
/* The result does not fit into the buffer. */ \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
- if (__builtin_expect (cp[1] < 0x80, 0)) \
+ if (__glibc_unlikely (cp[1] < 0x80)) \
{ \
- if (__builtin_expect (outptr + 2 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 >= outend)) \
{ \
/* The result does not fit into the buffer. */ \
result = __GCONV_FULL_OUTPUT; \
byte is also available. */ \
int ch2; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second byte is not available. Store the \
intermediate result. */ \
ch2 = inptr[1]; \
\
/* All second bytes of a multibyte character must be >= 0xa1. */ \
- if (__builtin_expect (ch2 < 0xa1, 0)) \
+ if (__glibc_unlikely (ch2 < 0xa1)) \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
\
if (ch == 0x8e) \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
} \
- if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
/* Illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
\
size_t found; \
\
/* See whether we have room for at least two characters. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
/* Two-byte character. First test whether the next byte \
is also available. */ \
ch = ksc5601_to_ucs4 (&inptr, inend - inptr, 0x80); \
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
/* The second byte is not available. */ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
} \
- if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (2); \
} \
/* Now test for a possible second byte and write this if possible. */ \
if (cp[1] != '\0') \
{ \
- if (__builtin_expect (outptr >= outend, 0)) \
+ if (__glibc_unlikely (outptr >= outend)) \
{ \
/* The result does not fit into the buffer. */ \
--outptr; \
\
inptr += 4; \
} \
- else if (__builtin_expect (ch2 >= 0x40, 1)) \
+ else if (__glibc_likely (ch2 >= 0x40)) \
{ \
/* A two-byte character */ \
idx = (ch - 0x81) * 192 + (ch2 - 0x40); \
{ \
/* See whether there is enough room for all four bytes we \
write. */ \
- if (__builtin_expect (outptr + 3 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 >= outend)) \
{ \
/* We have not enough room. */ \
result = __GCONV_FULL_OUTPUT; \
const char *cp; \
int idx; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store \
the intermediate result. */ \
ch = inptr[1]; \
\
/* All second bytes of a multibyte character must be >= 0xa1. */ \
- if (__builtin_expect (ch < 0xa1, 0)) \
+ if (__glibc_unlikely (ch < 0xa1)) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
\
/* Get the value from the table. */ \
cp = __from_gb2312_to_big5[idx]; \
- if (__builtin_expect (cp[0] == '\0', 0)) \
+ if (__glibc_unlikely (cp[0] == '\0')) \
{ \
/* We do not have a mapping for this character. \
If ignore errors, map it to 0xa1bc - big5 box character */ \
break; \
\
/* See if there is enough room to write the second byte. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
const char *cp; \
int idx; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store \
the intermediate result. */ \
\
/* Get the value from the table. */ \
cp = __from_big5_to_gb2312 [idx]; \
- if (__builtin_expect (cp[0] == '\0', 0)) \
+ if (__glibc_unlikely (cp[0] == '\0')) \
{ \
/* We do not have a mapping for this character. \
If ignore errors, map it to 0xa1f5 - gb box character */ \
break; \
\
/* See if there is enough room to write the second byte. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
UCS4 -> GB2312 -> GBK -> UCS4 \
\
might not produce identical text. */ \
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store \
the intermediate result. */ \
break; \
} \
\
- if (__builtin_expect (outend - outptr < 2, 0)) \
+ if (__glibc_unlikely (outend - outptr < 2)) \
{ \
/* We ran out of space. */ \
result = __GCONV_FULL_OUTPUT; \
ch = (ch << 8) | inptr[1]; \
\
/* Map 0xA844 (U2015 in GBK) to 0xA1AA (U2015 in GB2312). */ \
- if (__builtin_expect (ch == 0xa844, 0)) \
+ if (__glibc_unlikely (ch == 0xa844)) \
ch = 0xa1aa; \
\
/* Now determine whether the character is valid. */ \
\
if (ch > 0x7f) \
{ \
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store \
the intermediate result. */ \
break; \
} \
\
- if (__builtin_expect (outend - outptr < 2, 0)) \
+ if (__glibc_unlikely (outend - outptr < 2)) \
{ \
/* We ran out of space. */ \
result = __GCONV_FULL_OUTPUT; \
uint32_t ch2; \
int idx; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store \
the intermediate result. */ \
{ \
/* We are not in the initial state. To switch back we have \
to emit `SI'. */ \
- if (__builtin_expect (outbuf >= outend, 0)) \
+ if (__glibc_unlikely (outbuf >= outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
else \
{ \
/* This is a combined character. Make sure we have room. */ \
- if (__builtin_expect (outptr + 8 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 8 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
assert (curcs == db); \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store the \
intermediate result. */ \
curcs = db; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
uint32_t ch = get32 (inptr); \
\
- if (__builtin_expect (ch >= UCS_LIMIT, 0)) \
+ if (__glibc_unlikely (ch >= UCS_LIMIT)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
\
curcs = db; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
else \
{ \
- if (__builtin_expect (curcs == db, 0)) \
+ if (__glibc_unlikely (curcs == db)) \
{ \
/* We know there is room for at least one byte. */ \
*outptr++ = SI; \
curcs = sb; \
\
- if (__builtin_expect (outptr >= outend, 0)) \
+ if (__glibc_unlikely (outptr >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We are not in the initial state. To switch back we have \
to emit `SI'. */ \
- if (__builtin_expect (outbuf >= outend, 0)) \
+ if (__glibc_unlikely (outbuf >= outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
\
assert (curcs == db); \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store the \
intermediate result. */ \
const struct gap *rp2 = __ucs4_to_ibm930db_idx; \
const char *cp; \
\
- if (__builtin_expect (ch >= 0xffff, 0)) \
+ if (__glibc_unlikely (ch >= 0xffff)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
\
{ \
if (curcs == sb) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
curcs = db; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
if (curcs == db) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
*outptr++ = SI; \
} \
\
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
if (__builtin_expect (res == 0, 0) && ch != 0) \
{ \
/* Use the IBM932 table for double byte. */ \
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. \
Store the intermediate result. */ \
uint32_t high; \
uint16_t pccode; \
\
- if (__builtin_expect (ch >= 0xffff, 0)) \
+ if (__glibc_unlikely (ch >= 0xffff)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
rp = NULL; \
high = (sizeof (__ucs4_to_ibm932db) >> 1) \
/ sizeof (__ucs4_to_ibm932db[0][FROM]); \
pccode = ch; \
- if (__builtin_expect (rp != NULL, 1)) \
+ if (__glibc_likely (rp != NULL)) \
while (low < high) \
{ \
i = (low + high) >> 1; \
} \
if (found) \
{ \
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
else \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We are not in the initial state. To switch back we have \
to emit `SI'. */ \
- if (__builtin_expect (outbuf >= outend, 0)) \
+ if (__glibc_unlikely (outbuf >= outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
assert (curcs == db); \
\
/* Use the IBM933 table for double byte. */ \
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store the \
intermediate result. */ \
const struct gap *rp2 = __ucs4_to_ibm933db_idx; \
const char *cp; \
\
- if (__builtin_expect (ch >= 0xffff, 0)) \
+ if (__glibc_unlikely (ch >= 0xffff)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
\
{ \
if (curcs == sb) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
curcs = db; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
if (curcs == db) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
*outptr++ = SI; \
} \
\
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We are not in the initial state. To switch back we have \
to emit `SI'. */ \
- if (__builtin_expect (outbuf >= outend, 0)) \
+ if (__glibc_unlikely (outbuf >= outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
assert (curcs == db); \
\
/* Use the IBM935 table for double byte. */ \
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. \
Store the intermediate result. */ \
const struct gap *rp2 = __ucs4_to_ibm935db_idx; \
const char *cp; \
\
- if (__builtin_expect (ch >= 0xffff, 0)) \
+ if (__glibc_unlikely (ch >= 0xffff)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
\
{ \
if (curcs == sb) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
curcs = db; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
if (curcs == db) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
*outptr++ = SI; \
} \
\
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We are not in the initial state. To switch back we have \
to emit `SI'. */ \
- if (__builtin_expect (outbuf >= outend, 0)) \
+ if (__glibc_unlikely (outbuf >= outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
assert (curcs == db); \
\
/* Use the IBM937 table for double byte. */ \
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. \
Store the intermediate result. */ \
const struct gap *rp2 = __ucs4_to_ibm937db_idx; \
const char *cp; \
\
- if (__builtin_expect (ch >= 0xffff, 0)) \
+ if (__glibc_unlikely (ch >= 0xffff)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
\
{ \
if (curcs == sb) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
curcs = db; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
if (curcs == db) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
*outptr++ = SI; \
} \
\
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We are not in the initial state. To switch back we have \
to emit `SI'. */ \
- if (__builtin_expect (outbuf >= outend, 0)) \
+ if (__glibc_unlikely (outbuf >= outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
\
assert (curcs == db); \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store the \
intermediate result. */ \
const struct gap *rp2 = __ucs4_to_ibm939db_idx; \
const char *cp; \
\
- if (__builtin_expect (ch >= 0xffff, 0)) \
+ if (__glibc_unlikely (ch >= 0xffff)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
goto ibm939_invalid_char; \
{ \
if (curcs == sb) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
curcs = db; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
if (curcs == db) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
*outptr++ = SI; \
} \
\
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
__builtin_expect (res == 0, 0) && ch != 0)) \
{ \
/* Use the IBM943 table for double byte. */ \
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. \
Store the intermediate result. */ \
uint32_t high; \
uint16_t pccode; \
\
- if (__builtin_expect (ch >= 0xffff, 0)) \
+ if (__glibc_unlikely (ch >= 0xffff)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
rp = NULL; \
high = (sizeof (__ucs4_to_ibm943db) >> 1) \
/ sizeof (__ucs4_to_ibm943db[0][FROM]); \
pccode = ch; \
- if (__builtin_expect (rp != NULL, 1)) \
+ if (__glibc_likely (rp != NULL)) \
while (low < high) \
{ \
i = (low + high) >> 1; \
} \
if (found) \
{ \
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
else \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We are not in the initial state. To switch back we have \
to emit `SI'. */ \
- if (__builtin_expect (outbuf == outend, 0)) \
+ if (__glibc_unlikely (outbuf == outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
{ \
/* We are not in the initial state. To switch back we have \
to emit `SI'. */ \
- if (__builtin_expect (outbuf == outend, 0)) \
+ if (__glibc_unlikely (outbuf == outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
uint32_t ch = *inptr; \
\
/* This is a 7bit character set, disallow all 8bit characters. */ \
- if (__builtin_expect (ch >= 0x7f, 0)) \
+ if (__glibc_unlikely (ch >= 0x7f)) \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
\
/* Recognize escape sequences. */ \
{ \
*outptr++ = SI; \
set = ASCII_set; \
- if (__builtin_expect (outptr == outend, 0)) \
+ if (__glibc_unlikely (outptr == outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
const char *escseq; \
\
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
\
if (used == CNS11643_2_set) \
{ \
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
SO charset. */ \
if (set == ASCII_set) \
{ \
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
\
/* Always test the length here since we have used up all the \
guaranteed output buffer slots. */ \
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
} \
- else if (__builtin_expect (outptr + 2 > outend, 0)) \
+ else if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
+ ((data->__statep->__count & CURRENT_SEL_MASK) != ASCII_set \
? 3 : 0); \
\
- if (__builtin_expect (outbuf + need > outend, 0)) \
+ if (__glibc_unlikely (outbuf + need > outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
uint32_t ch = *inptr; \
\
/* Recognize escape sequences. */ \
- if (__builtin_expect (ch == ESC, 0)) \
+ if (__glibc_unlikely (ch == ESC)) \
{ \
/* We now must be prepared to read two to three more bytes. \
If we have a match in the first byte but then the input buffer \
{ \
/* Use the JIS X 0201 table. */ \
ch = jisx0201_to_ucs4 (ch); \
- if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
{ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
} \
{ \
/* Use the JIS X 0201 table. */ \
ch = jisx0201_to_ucs4 (ch + 0x80); \
- if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
{ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
} \
provide the appropriate tables. */ \
ch = jisx0208_to_ucs4 (&inptr, inend - inptr, 0); \
\
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
} \
- else if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ else if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
{ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
} \
} \
else /* (set == JISX0213_1_2004_set || set == JISX0213_2_set) */ \
{ \
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
|| (set != JISX0213_1_2000_set && set != JISX0213_1_2004_set) \
? 4 : 0); \
\
- if (__builtin_expect (outptr + need + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + need + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
size_t need = (lasttwo >> 16 ? 3 : 0); \
\
- if (__builtin_expect (outptr + need + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + need + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
inptr += 4; \
continue; \
} \
- if (__builtin_expect (written == 0, 0)) \
+ if (__glibc_unlikely (written == 0)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
continue; \
} \
\
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We must encode using ASCII. First write out the escape \
sequence. */ \
- if (__builtin_expect (outptr + 3 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
*outptr++ = 'B'; \
set = ASCII_set; \
\
- if (__builtin_expect (outptr >= outend, 0)) \
+ if (__glibc_unlikely (outptr >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
if (set != JISX0201_Roman_set) \
{ \
- if (__builtin_expect (outptr + 3 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
set = JISX0201_Roman_set; \
} \
\
- if (__builtin_expect (outptr >= outend, 0)) \
+ if (__glibc_unlikely (outptr >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
\
if (set != JISX0208_1983_set) \
{ \
- if (__builtin_expect (outptr + 3 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
set = JISX0208_1983_set; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
\
if (set != new_set) \
{ \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
continue; \
} \
\
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
set = JISX0201_Kana_set; \
} \
\
- if (__builtin_expect (outptr >= outend, 0)) \
+ if (__glibc_unlikely (outptr >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We are not in the initial state. To switch back we have \
to emit the sequence `Esc ( B'. */ \
- if (__builtin_expect (outbuf + 3 > outend, 0)) \
+ if (__glibc_unlikely (outbuf + 3 > outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
{ \
/* Use the JIS X 0201 table. */ \
ch = jisx0201_to_ucs4 (ch); \
- if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
++inptr; \
} \
{ \
/* Use the JIS X 0201 table. */ \
ch = jisx0201_to_ucs4 (ch + 0x80); \
- if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
++inptr; \
} \
ch = ksc5601_to_ucs4 (&inptr, inend - inptr, 0); \
} \
\
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
} \
- else if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ else if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
{ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
} \
if (var == iso2022jp2) \
{ \
/* Handle Unicode tag characters (range U+E0000..U+E007F). */ \
- if (__builtin_expect ((ch >> 7) == (0xe0000 >> 7), 0)) \
+ if (__glibc_unlikely ((ch >> 7) == (0xe0000 >> 7))) \
{ \
ch &= 0x7f; \
if (ch >= 'A' && ch <= 'Z') \
\
/* Non-tag characters reset the tag parsing state, if the current \
state is a temporary state. */ \
- if (__builtin_expect (tag >= TAG_language, 0)) \
+ if (__glibc_unlikely (tag >= TAG_language)) \
tag = TAG_none; \
} \
\
else \
written = __UNKNOWN_10646_CHAR; \
\
- if (__builtin_expect (written == 0, 0)) \
+ if (__glibc_unlikely (written == 0)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
if (ch >= 0x80 && ch <= 0xff) \
{ \
- if (__builtin_expect (outptr + 3 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
else if (set2 == ISO88597_set) \
{ \
- if (__builtin_expect (ch < 0xffff, 1)) \
+ if (__glibc_likely (ch < 0xffff)) \
{ \
const struct gap *rp = from_idx; \
\
iso88597_from_ucs4[ch - 0xa0 + rp->idx]; \
if (res != '\0') \
{ \
- if (__builtin_expect (outptr + 3 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We must encode using ASCII. First write out the \
escape sequence. */ \
- if (__builtin_expect (outptr + 3 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
*outptr++ = 'B'; \
set = ASCII_set; \
\
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
set2 = ISO88591_set; \
} \
\
- if (__builtin_expect (outptr + 3 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 > outend)) \
{ \
res = __GCONV_FULL_OUTPUT; \
break; \
} \
\
/* Try ISO 8859-7 upper half. */ \
- if (__builtin_expect (ch < 0xffff, 1)) \
+ if (__glibc_likely (ch < 0xffff)) \
{ \
const struct gap *rp = from_idx; \
\
set = JISX0201_Roman_set; \
} \
\
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
res = __GCONV_FULL_OUTPUT; \
break; \
set = JISX0208_1983_set; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
res = __GCONV_FULL_OUTPUT; \
break; \
break; \
} \
\
- if (__builtin_expect (var == iso2022jp, 0)) \
+ if (__glibc_unlikely (var == iso2022jp)) \
/* Don't use the other Japanese character sets. */ \
break; \
\
set = JISX0212_set; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
res = __GCONV_FULL_OUTPUT; \
break; \
set = GB2312_set; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
res = __GCONV_FULL_OUTPUT; \
break; \
set = KSC5601_set; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
res = __GCONV_FULL_OUTPUT; \
break; \
set = JISX0201_Kana_set; \
} \
\
- if (__builtin_expect (outptr + 1 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 > outend)) \
{ \
res = __GCONV_FULL_OUTPUT; \
break; \
{ \
/* We are not in the initial state. To switch back we have \
to emit `SI'. */ \
- if (__builtin_expect (outbuf == outend, 0)) \
+ if (__glibc_unlikely (outbuf == outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
uint32_t ch = *inptr; \
\
/* This is a 7bit character set, disallow all 8bit characters. */ \
- if (__builtin_expect (ch > 0x7f, 0)) \
+ if (__glibc_unlikely (ch > 0x7f)) \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
\
/* Recognize escape sequences. */ \
/* Use the KSC 5601 table. */ \
ch = ksc5601_to_ucs4 (&inptr, inend - inptr, 0); \
\
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
} \
- else if (__builtin_expect (ch == __UNKNOWN_10646_CHAR, 0)) \
+ else if (__glibc_unlikely (ch == __UNKNOWN_10646_CHAR)) \
{ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
} \
{ \
*outptr++ = SI; \
set = ASCII_set; \
- if (__builtin_expect (outptr == outend, 0)) \
+ if (__glibc_unlikely (outptr == outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
set = KSC5601_set; \
} \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
ch = 0x5d; \
break; \
default: \
- if (__builtin_expect (ch > 0x7f, 0)) \
+ if (__glibc_unlikely (ch > 0x7f)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
failure = __GCONV_ILLEGAL_INPUT; \
#define BODY \
{ \
uint32_t ch = *((const uint32_t *) inptr); \
- if (__builtin_expect (ch > 0xff, 0)) \
+ if (__glibc_unlikely (ch > 0xff)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
\
#define BODY \
{ \
uint32_t ch = *((const uint32_t *) inptr); \
- if (__builtin_expect ((ch & 0xffffff00u) != BRAILLE_UCS_BASE, 0)) \
+ if (__glibc_unlikely ((ch & 0xffffff00u) != BRAILLE_UCS_BASE)) \
{ \
UNICODE_TAG_HANDLER (ch, 4); \
\
is also available. */ \
int ch2; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store the \
intermediate result. */ \
\
ch = to_ucs4_comb[ch - 0xc1][ch2 - 0x20]; \
\
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
/* Illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (2); \
/* Now test for a possible second byte and write this if possible. */ \
if (cp[1] != '\0') \
{ \
- if (__builtin_expect (outptr >= outend, 0)) \
+ if (__glibc_unlikely (outptr >= outend)) \
{ \
/* The result does not fit into the buffer. */ \
--outptr; \
is also available. */ \
int ch2; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store the \
intermediate result. */ \
\
ch = to_ucs4_comb[ch - 0xc1][ch2 - 0x20]; \
\
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
/* Illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (2); \
fail = 1; \
} \
\
- if (__builtin_expect (fail, 0)) \
+ if (__glibc_unlikely (fail)) \
{ \
/* Illegal characters. */ \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
/* Now test for a possible second byte and write this if possible. */ \
if (cp[1] != '\0') \
{ \
- if (__builtin_expect (outptr >= outend, 0)) \
+ if (__glibc_unlikely (outptr >= outend)) \
{ \
/* The result does not fit into the buffer. */ \
--outptr; \
uint32_t ch2; \
uint_fast32_t idx; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store the \
intermediate result. */ \
\
ch2 = inptr[1]; \
idx = ch * 256 + ch2; \
- if (__builtin_expect (ch <= 0xd3, 1)) \
+ if (__glibc_likely (ch <= 0xd3)) \
{ \
/* Hangul */ \
int_fast32_t i, m, f; \
} \
} \
\
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (2); \
{ \
if (ch >= 0xac00 && ch <= 0xd7a3) \
{ \
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
ch = jamo_from_ucs_table[ch - 0x3131]; \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
- if (__builtin_expect (written == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (written == __UNKNOWN_10646_CHAR)) \
{ \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
} \
{ \
if (FROM_DIRECTION) \
{ \
- if (__builtin_expect (outbuf + 4 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 4 <= outend)) \
{ \
/* Write out the last character. */ \
*((uint32_t *) outbuf) = data->__statep->__count >> 3; \
} \
else \
{ \
- if (__builtin_expect (outbuf + 2 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 2 <= outend)) \
{ \
/* Write out the last character. */ \
uint32_t lasttwo = data->__statep->__count >> 3; \
\
/* Determine whether there is a buffered character pending. */ \
ch = *statep >> 3; \
- if (__builtin_expect (ch == 0, 1)) \
+ if (__glibc_likely (ch == 0)) \
{ \
/* No - so look at the next input byte. */ \
ch = *inptr; \
if (ch < 0x80) \
{ \
/* Plain ISO646-JP character. */ \
- if (__builtin_expect (ch == 0x5c, 0)) \
+ if (__glibc_unlikely (ch == 0x5c)) \
ch = 0xa5; \
- else if (__builtin_expect (ch == 0x7e, 0)) \
+ else if (__glibc_unlikely (ch == 0x7e)) \
ch = 0x203e; \
++inptr; \
} \
/* Two byte character. */ \
uint32_t ch2; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second byte is not available. */ \
result = __GCONV_INCOMPLETE_INPUT; \
ch2 = inptr[1]; \
\
/* The second byte must be in the range 0x{40..7E,80..FC}. */ \
- if (__builtin_expect (ch2 < 0x40 || ch2 == 0x7f || ch2 > 0xfc, 0))\
+ if (__glibc_unlikely (ch2 < 0x40 || ch2 == 0x7f || ch2 > 0xfc)) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
if (len > 0) \
{ \
/* Output the combined character. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
\
not_combining: \
/* Output the buffered character. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
\
/* Output the shifted representation. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
uint32_t ch2; \
uint_fast32_t idx; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second byte is not available. Store \
the intermediate result. */ \
\
ch2 = inptr[1]; \
idx = ch * 256 + ch2; \
- if (__builtin_expect (ch2 < 0x40, 0)) \
+ if (__glibc_unlikely (ch2 < 0x40)) \
{ \
/* This is illegal. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (1); \
else \
ch = cjk_block4[(ch - 0xe0) * 192 + ch2 - 0x40]; \
\
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (2); \
/* Now test for a possible second byte and write this if possible. */\
if (cp[1] != '\0') \
{ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
/* The result does not fit into the buffer. */ \
result = __GCONV_FULL_OUTPUT; \
is also available. */ \
uint32_t ch2; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. */ \
result = __GCONV_INCOMPLETE_INPUT; \
/* Now test for a possible second byte and write this if possible. */ \
if (cp[1] != '\0') \
{ \
- if (__builtin_expect (outptr >= outend, 0)) \
+ if (__glibc_unlikely (outptr >= outend)) \
{ \
/* The result does not fit into the buffer. */ \
--outptr; \
{ \
if (FROM_DIRECTION) \
{ \
- if (__builtin_expect (outbuf + 4 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 4 <= outend)) \
{ \
/* Write out the last character. */ \
*((uint32_t *) outbuf) = data->__statep->__count >> 3; \
res = 0; \
} \
\
- if (__builtin_expect (res != 0, 1)) \
+ if (__glibc_likely (res != 0)) \
{ \
*outptr++ = res; \
inptr += 4; \
} \
\
/* See whether we have room for two bytes. */ \
- if (__builtin_expect (outptr + 1 >= outend, 0)) \
+ if (__glibc_unlikely (outptr + 1 >= outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
do \
{ \
- if (__builtin_expect (outbuf + 4 > outend, 0)) \
+ if (__glibc_unlikely (outbuf + 4 > outend)) \
{ \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
{ \
uint32_t last = data->__statep->__count >> 3; \
- if (__builtin_expect (last >> 8, 0)) \
+ if (__glibc_unlikely (last >> 8)) \
{ \
/* Write out the last character, two bytes. */ \
- if (__builtin_expect (outbuf + 2 <= outend, 1)) \
+ if (__glibc_likely (outbuf + 2 <= outend)) \
{ \
*outbuf++ = last & 0xff; \
*outbuf++ = (last >> 8) & 0xff; \
else \
{ \
/* Write out the last character, a single byte. */ \
- if (__builtin_expect (outbuf < outend, 1)) \
+ if (__glibc_likely (outbuf < outend)) \
{ \
*outbuf++ = last & 0xff; \
data->__statep->__count = 0; \
/* See whether we have room for two characters. Otherwise \
store only the first character now, and put the second \
one into the queue. */ \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
*statep = u2 << 8; \
result = __GCONV_FULL_OUTPUT; \
inptr++; \
put32 (outptr, 0x0BB8); \
outptr += 4; \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
*statep = (0x0BCD << 8) + (4 << 4); \
result = __GCONV_FULL_OUTPUT; \
} \
put32 (outptr, 0x0BCD); \
outptr += 4; \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
*statep = (0x0BB0 << 8) + (2 << 4); \
result = __GCONV_FULL_OUTPUT; \
} \
put32 (outptr, 0x0BB0); \
outptr += 4; \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
*statep = (0x0BC0 << 8); \
result = __GCONV_FULL_OUTPUT; \
inptr++; \
put32 (outptr, 0x0B95); \
outptr += 4; \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
*statep = (0x0BCD << 8) + (1 << 4); \
result = __GCONV_FULL_OUTPUT; \
} \
put32 (outptr, 0x0BCD); \
outptr += 4; \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
*statep = (0x0BB7 << 8); \
result = __GCONV_FULL_OUTPUT; \
inptr++; \
put32 (outptr, 0x0B95); \
outptr += 4; \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
*statep = (0x0BCD << 8) + (5 << 4); \
result = __GCONV_FULL_OUTPUT; \
} \
put32 (outptr, 0x0BCD); \
outptr += 4; \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
*statep = (0x0BB7 << 8) + (3 << 4); \
result = __GCONV_FULL_OUTPUT; \
} \
put32 (outptr, 0x0BB7); \
outptr += 4; \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
*statep = (0x0BCD << 8); \
result = __GCONV_FULL_OUTPUT; \
} \
if (ch == 0x0BC6) \
{ \
- if (__builtin_expect (outptr + 2 <= outend, 1)) \
+ if (__glibc_likely (outptr + 2 <= outend)) \
{ \
*outptr++ = 0xa6; \
*outptr++ = last; \
} \
if (ch == 0x0BC7) \
{ \
- if (__builtin_expect (outptr + 2 <= outend, 1)) \
+ if (__glibc_likely (outptr + 2 <= outend)) \
{ \
*outptr++ = 0xa7; \
*outptr++ = last; \
} \
if (ch == 0x0BC8) \
{ \
- if (__builtin_expect (outptr + 2 <= outend, 1)) \
+ if (__glibc_likely (outptr + 2 <= outend)) \
{ \
*outptr++ = 0xa8; \
*outptr++ = last; \
} \
if (ch == 0x0BCA) \
{ \
- if (__builtin_expect (outptr + 3 <= outend, 1)) \
+ if (__glibc_likely (outptr + 3 <= outend)) \
{ \
*outptr++ = 0xa6; \
*outptr++ = last; \
} \
if (ch == 0x0BCB) \
{ \
- if (__builtin_expect (outptr + 3 <= outend, 1)) \
+ if (__glibc_likely (outptr + 3 <= outend)) \
{ \
*outptr++ = 0xa7; \
*outptr++ = last; \
} \
if (ch == 0x0BCC) \
{ \
- if (__builtin_expect (outptr + 3 <= outend, 1)) \
+ if (__glibc_likely (outptr + 3 <= outend)) \
{ \
*outptr++ = 0xa7; \
*outptr++ = last; \
} \
\
/* Output the buffered character. */ \
- if (__builtin_expect (last >> 8, 0)) \
+ if (__glibc_unlikely (last >> 8)) \
{ \
- if (__builtin_expect (outptr + 2 <= outend, 1)) \
+ if (__glibc_likely (outptr + 2 <= outend)) \
{ \
*outptr++ = last & 0xff; \
*outptr++ = (last >> 8) & 0xff; \
else if (ch >= 0x0BCA && ch <= 0x0BCC) \
{ \
/* See whether we have room for two bytes. */ \
- if (__builtin_expect (outptr + 2 <= outend, 1)) \
+ if (__glibc_likely (outptr + 2 <= outend)) \
{ \
*outptr++ = (ch == 0x0BCA ? 0xa6 : 0xa7); \
*outptr++ = (ch != 0x0BCC ? 0xa1 : 0xaa); \
is also available. */ \
uint32_t ch2; \
\
- if (__builtin_expect (inptr + 1 >= inend, 0)) \
+ if (__glibc_unlikely (inptr + 1 >= inend)) \
{ \
/* The second character is not available. Store \
the intermediate result. */ \
? (ch - 0x81) * 178 \
: 5696 + (ch - 0xa1) * 84)]; \
\
- if (__builtin_expect (ch == 0, 0)) \
+ if (__glibc_unlikely (ch == 0)) \
{ \
/* This is an illegal character. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (2); \
{ \
const char *s = uhc_hangul_from_ucs[ch - 0xac00]; \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
{ \
size_t written = ucs4_to_ksc5601_hanja (ch, outptr, outend - outptr); \
\
- if (__builtin_expect (written == 0, 0)) \
+ if (__glibc_unlikely (written == 0)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
- if (__builtin_expect (written == __UNKNOWN_10646_CHAR, 0)) \
+ if (__glibc_unlikely (written == __UNKNOWN_10646_CHAR)) \
{ \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
} \
UNICODE_TAG_HANDLER (ch, 4); \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
} \
- if (__builtin_expect (written == 0, 0)) \
+ if (__glibc_unlikely (written == 0)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
else if (!data->__internal_use && data->__invocation_counter == 0) \
{ \
/* Emit the Byte Order Mark. */ \
- if (__builtin_expect (outbuf + 2 > outend, 0)) \
+ if (__glibc_unlikely (outbuf + 2 > outend)) \
return __GCONV_FULL_OUTPUT; \
\
put16u (outbuf, BOM); \
{ \
uint32_t c = get32 (inptr); \
\
- if (__builtin_expect (c >= 0x10000, 0)) \
+ if (__glibc_unlikely (c >= 0x10000)) \
{ \
UNICODE_TAG_HANDLER (c, 4); \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
} \
- else if (__builtin_expect (c >= 0xd800 && c < 0xe000, 0)) \
+ else if (__glibc_unlikely (c >= 0xd800 && c < 0xe000)) \
{ \
/* Surrogate characters in UCS-4 input are not valid. \
We must catch this, because the UCS-2 output might be \
if (swap) \
u1 = bswap_16 (u1); \
\
- if (__builtin_expect (u1 >= 0xd800 && u1 < 0xe000, 0)) \
+ if (__glibc_unlikely (u1 >= 0xd800 && u1 < 0xe000)) \
{ \
/* Surrogate characters in UCS-2 input are not valid. Reject \
them. (Catching this here is not security relevant.) */ \
#define PREPARE_LOOP \
enum direction dir = ((struct utf16_data *) step->__data)->dir; \
enum variant var = ((struct utf16_data *) step->__data)->var; \
- if (__builtin_expect (data->__invocation_counter == 0, 0)) \
+ if (__glibc_unlikely (data->__invocation_counter == 0)) \
{ \
if (var == UTF_16) \
{ \
else if (!FROM_DIRECTION && !data->__internal_use) \
{ \
/* Emit the Byte Order Mark. */ \
- if (__builtin_expect (outbuf + 2 > outend, 0)) \
+ if (__glibc_unlikely (outbuf + 2 > outend)) \
return __GCONV_FULL_OUTPUT; \
\
put16u (outbuf, BOM); \
{ \
uint32_t c = get32 (inptr); \
\
- if (__builtin_expect (c >= 0xd800 && c < 0xe000, 0)) \
+ if (__glibc_unlikely (c >= 0xd800 && c < 0xe000)) \
{ \
/* Surrogate characters in UCS-4 input are not valid. \
We must catch this. If we let surrogates pass through, \
\
if (swap) \
{ \
- if (__builtin_expect (c >= 0x10000, 0)) \
+ if (__glibc_unlikely (c >= 0x10000)) \
{ \
- if (__builtin_expect (c >= 0x110000, 0)) \
+ if (__glibc_unlikely (c >= 0x110000)) \
{ \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
} \
\
/* Generate a surrogate character. */ \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
} \
else \
{ \
- if (__builtin_expect (c >= 0x10000, 0)) \
+ if (__glibc_unlikely (c >= 0x10000)) \
{ \
- if (__builtin_expect (c >= 0x110000, 0)) \
+ if (__glibc_unlikely (c >= 0x110000)) \
{ \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
} \
\
/* Generate a surrogate character. */ \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
\
/* It's a surrogate character. At least the first word says \
it is. */ \
- if (__builtin_expect (inptr + 4 > inend, 0)) \
+ if (__glibc_unlikely (inptr + 4 > inend)) \
{ \
/* We don't have enough input for another complete input \
character. */ \
{ \
/* It's a surrogate character. At least the first word says \
it is. */ \
- if (__builtin_expect (inptr + 4 > inend, 0)) \
+ if (__glibc_unlikely (inptr + 4 > inend)) \
{ \
/* We don't have enough input for another complete input \
character. */ \
int swap; \
if (FROM_DIRECTION && var == UTF_32) \
{ \
- if (__builtin_expect (data->__invocation_counter == 0, 0)) \
+ if (__glibc_unlikely (data->__invocation_counter == 0)) \
{ \
/* We have to find out which byte order the file is encoded in. */ \
if (inptr + 4 > inend) \
&& data->__invocation_counter == 0) \
{ \
/* Emit the Byte Order Mark. */ \
- if (__builtin_expect (outbuf + 4 > outend, 0)) \
+ if (__glibc_unlikely (outbuf + 4 > outend)) \
return __GCONV_FULL_OUTPUT; \
\
put32u (outbuf, BOM); \
{ \
uint32_t c = get32 (inptr); \
\
- if (__builtin_expect (c >= 0x110000, 0)) \
+ if (__glibc_unlikely (c >= 0x110000)) \
{ \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
} \
- else if (__builtin_expect (c >= 0xd800 && c < 0xe000, 0)) \
+ else if (__glibc_unlikely (c >= 0xd800 && c < 0xe000)) \
{ \
/* Surrogate characters in UCS-4 input are not valid. \
We must catch this. If we let surrogates pass through, \
if (swap) \
u1 = bswap_32 (u1); \
\
- if (__builtin_expect (u1 >= 0x110000, 0)) \
+ if (__glibc_unlikely (u1 >= 0x110000)) \
{ \
/* This is illegal. */ \
STANDARD_FROM_LOOP_ERR_HANDLER (4); \
put32 (outptr, ch); \
outptr += 4; \
} \
- else if (__builtin_expect (ch == '+', 1)) \
+ else if (__glibc_likely (ch == '+')) \
{ \
- if (__builtin_expect (inptr + 2 > inend, 0)) \
+ if (__glibc_unlikely (inptr + 2 > inend)) \
{ \
/* Not enough input available. */ \
result = __GCONV_INCOMPLETE_INPUT; \
else \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
\
- if (__builtin_expect (outptr + count > outend, 0)) \
+ if (__glibc_unlikely (outptr + count > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
size_t count; \
\
count = ((statep->__count & 0x18) >= 0x10) + isxbase64 (ch) + 1; \
- if (__builtin_expect (outptr + count > outend, 0)) \
+ if (__glibc_unlikely (outptr + count > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
else \
STANDARD_TO_LOOP_ERR_HANDLER (4); \
\
- if (__builtin_expect (outptr + count > outend, 0)) \
+ if (__glibc_unlikely (outptr + count > outend)) \
{ \
result = __GCONV_FULL_OUTPUT; \
break; \
/* Deactivate base64 encoding. */ \
size_t count = ((state & 0x18) >= 0x10) + 1; \
\
- if (__builtin_expect (outbuf + count > outend, 0)) \
+ if (__glibc_unlikely (outbuf + count > outend)) \
/* We don't have enough room in the output buffer. */ \
status = __GCONV_FULL_OUTPUT; \
else \
do \
{ \
__atg11_oldval = *__atg11_memp; \
- if (__builtin_expect (__atg11_oldval <= 0, 0)) \
+ if (__glibc_unlikely (__atg11_oldval <= 0)) \
break; \
} \
while (__builtin_expect \
int dsize = cmsg->cmsg_len - CMSG_LEN (0);
/* The first two bytes of the option are for the extended header. */
- if (__builtin_expect (dsize == 0, 0))
+ if (__glibc_unlikely (dsize == 0))
{
cmsg->cmsg_len += sizeof (struct ip6_ext);
dsize = sizeof (struct ip6_ext);
#else
size_t len = strlen (dirname) + 1;
result = (char *) malloc (len);
- if (__builtin_expect (result != NULL, 1))
+ if (__glibc_likely (result != NULL))
memcpy (result, dirname, len);
#endif
}
- if (__builtin_expect (result != NULL, 1))
+ if (__glibc_likely (result != NULL))
{
if (binding->dirname != _nl_default_dirname)
free (binding->dirname);
#else
size_t len = strlen (codeset) + 1;
result = (char *) malloc (len);
- if (__builtin_expect (result != NULL, 1))
+ if (__glibc_likely (result != NULL))
memcpy (result, codeset, len);
#endif
- if (__builtin_expect (result != NULL, 1))
+ if (__glibc_likely (result != NULL))
{
free (binding->codeset);
struct binding *new_binding =
(struct binding *) malloc (offsetof (struct binding, domainname) + len);
- if (__builtin_expect (new_binding == NULL, 0))
+ if (__glibc_unlikely (new_binding == NULL))
goto failed;
memcpy (new_binding->domainname, domainname, len);
char *result;
#if defined _LIBC || defined HAVE_STRDUP
result = strdup (dirname);
- if (__builtin_expect (result == NULL, 0))
+ if (__glibc_unlikely (result == NULL))
goto failed_dirname;
#else
size_t len = strlen (dirname) + 1;
result = (char *) malloc (len);
- if (__builtin_expect (result == NULL, 0))
+ if (__glibc_unlikely (result == NULL))
goto failed_dirname;
memcpy (result, dirname, len);
#endif
#if defined _LIBC || defined HAVE_STRDUP
result = strdup (codeset);
- if (__builtin_expect (result == NULL, 0))
+ if (__glibc_unlikely (result == NULL))
goto failed_codeset;
#else
size_t len = strlen (codeset) + 1;
result = (char *) malloc (len);
- if (__builtin_expect (result == NULL, 0))
+ if (__glibc_unlikely (result == NULL))
goto failed_codeset;
memcpy (result, codeset, len);
#endif
/* Resource problems are not fatal, instead we return no
translation. */
- if (__builtin_expect (retval == (char *) -1, 0))
+ if (__glibc_unlikely (retval == (char *) -1))
goto no_translation;
if (retval != NULL)
/* Returning -1 means that some resource problem exists
(likely memory) and that the strings could not be
converted. Return the original strings. */
- if (__builtin_expect (retval == (char *) -1, 0))
+ if (__glibc_unlikely (retval == (char *) -1))
goto no_translation;
if (retval != NULL)
realloc (domain->conversions,
(nconversions + 1) * sizeof (struct converted_domain));
- if (__builtin_expect (new_conversions == NULL, 0))
+ if (__glibc_unlikely (new_conversions == NULL))
{
/* Nothing we can do, no more memory. We cannot use the
translation because it might be encoded incorrectly. */
/* Copy the 'encoding' string to permanent storage. */
encoding = strdup (encoding);
- if (__builtin_expect (encoding == NULL, 0))
+ if (__glibc_unlikely (encoding == NULL))
/* Nothing we can do, no more memory. We cannot use the
translation because it might be encoded incorrectly. */
goto unlock_fail;
/* Resource problems are fatal. If we continue onwards we will
only attempt to calloc a new conv_tab and fail later. */
- if (__builtin_expect (nullentry == (char *) -1, 0))
+ if (__glibc_unlikely (nullentry == (char *) -1))
return (char *) -1;
if (nullentry != NULL)
charset = norm_add_slashes (charset, "");
int r = __gconv_open (outcharset, charset, &convd->conv,
GCONV_AVOID_NOCONV);
- if (__builtin_expect (r != __GCONV_OK, 0))
+ if (__glibc_unlikely (r != __GCONV_OK))
{
/* If the output encoding is the same there is
nothing to do. Otherwise do not use the
translation at all. */
- if (__builtin_expect (r != __GCONV_NULCONV, 1))
+ if (__glibc_likely (r != __GCONV_NULCONV))
{
__libc_rwlock_unlock (domain->conversions_lock);
free ((char *) encoding);
handle this case by converting RESULTLEN bytes, including
NULs. */
- if (__builtin_expect (convd->conv_tab == NULL, 0))
+ if (__glibc_unlikely (convd->conv_tab == NULL))
{
__libc_lock_lock (lock);
if (convd->conv_tab == NULL)
__libc_lock_unlock (lock);
}
- if (__builtin_expect (convd->conv_tab == (char **) -1, 0))
+ if (__glibc_unlikely (convd->conv_tab == (char **) -1))
/* Nothing we can do, no more memory. We cannot use the
translation because it might be encoded incorrectly. */
return (char *) -1;
/* Fall through and return -1. */
# endif
}
- if (__builtin_expect (newmem == NULL, 0))
+ if (__glibc_unlikely (newmem == NULL))
{
freemem = NULL;
freemem_size = 0;
data = (struct mo_file_header *) mmap (NULL, size, PROT_READ,
MAP_PRIVATE, fd, 0);
- if (__builtin_expect (data != MAP_FAILED, 1))
+ if (__glibc_likely (data != MAP_FAILED))
{
/* mmap() call was successful. */
close (fd);
/* Get the header entry and look for a plural specification. */
nullentry = _nl_find_msg (domain_file, domainbinding, "", 0, &nullentrylen);
- if (__builtin_expect (nullentry == (char *) -1, 0))
+ if (__glibc_unlikely (nullentry == (char *) -1))
{
__libc_rwlock_fini (domain->conversions_lock);
goto invalid;
*cp++ = '\0';
if (nmap >= maxmap)
- if (__builtin_expect (extend_alias_table (), 0))
+ if (__glibc_unlikely (extend_alias_table ()))
goto out;
alias_len = strlen (alias) + 1;
if (new_pool == NULL)
goto out;
- if (__builtin_expect (string_space != new_pool, 0))
+ if (__glibc_unlikely (string_space != new_pool))
{
size_t i;
function it points to. This is to make sure _IO_cleanup gets called
on exit. We call it from _IO_file_doallocate, since that is likely
to get called by any program that does buffered I/O. */
- if (__builtin_expect (_IO_cleanup_registration_needed != NULL, 0))
+ if (__glibc_unlikely (_IO_cleanup_registration_needed != NULL))
(*_IO_cleanup_registration_needed) ();
#endif
{
int fdesc;
#ifdef _LIBC
- if (__builtin_expect (fp->_flags2 & _IO_FLAGS2_NOTCANCEL, 0))
+ if (__glibc_unlikely (fp->_flags2 & _IO_FLAGS2_NOTCANCEL))
fdesc = open_not_cancel (filename,
posix_mode | (is32not64 ? 0 : O_LARGEFILE), prot);
else
if (fp->_IO_read_ptr < fp->_IO_read_end)
return *(unsigned char *) fp->_IO_read_ptr;
- if (__builtin_expect (mmap_remap_check (fp), 0))
+ if (__glibc_unlikely (mmap_remap_check (fp)))
/* We punted to the regular file functions. */
return _IO_UNDERFLOW (fp);
makes room for subsequent output.
Otherwise, set the read pointers to _IO_read_end (leaving that
alone, so it can continue to correspond to the external position). */
- if (__builtin_expect (_IO_in_backup (f), 0))
+ if (__glibc_unlikely (_IO_in_backup (f)))
{
size_t nbackup = f->_IO_read_end - f->_IO_read_ptr;
_IO_free_backup_area (f);
if (have < n)
{
- if (__builtin_expect (_IO_in_backup (fp), 0))
+ if (__glibc_unlikely (_IO_in_backup (fp)))
{
#ifdef _LIBC
s = __mempcpy (s, read_ptr, have);
if (have < n)
{
/* Check that we are mapping all of the file, in case it grew. */
- if (__builtin_expect (mmap_remap_check (fp), 0))
+ if (__glibc_unlikely (mmap_remap_check (fp)))
/* We punted mmap, so complete with the vanilla code. */
return s - (char *) data + _IO_XSGETN (fp, data, n);
cookie_io_functions_t iof;
fmemopen_cookie_t *c;
- if (__builtin_expect (len == 0, 0))
+ if (__glibc_unlikely (len == 0))
{
einval:
__set_errno (EINVAL);
}
else
{
- if (__builtin_expect ((uintptr_t) len > -(uintptr_t) buf, 0))
+ if (__glibc_unlikely ((uintptr_t) len > -(uintptr_t) buf))
{
free (c);
goto einval;
CHECK_FILE (fp, NULL);
if (n <= 0)
return NULL;
- if (__builtin_expect (n == 1, 0))
+ if (__glibc_unlikely (n == 1))
{
/* Another irregular case: since we have to store a NUL byte and
there is only room for exactly one byte, we don't have to
CHECK_FILE (fp, NULL);
if (n <= 0)
return NULL;
- if (__builtin_expect (n == 1, 0))
+ if (__glibc_unlikely (n == 1))
{
/* Another irregular case: since we have to store a NUL byte and
there is only room for exactly one byte, we don't have to
CHECK_FILE (fp, NULL);
if (n <= 0)
return NULL;
- if (__builtin_expect (n == 1, 0))
+ if (__glibc_unlikely (n == 1))
{
/* Another irregular case: since we have to store a NUL byte and
there is only room for exactly one byte, we don't have to
CHECK_FILE (fp, NULL);
if (n <= 0)
return NULL;
- if (__builtin_expect (n == 1, 0))
+ if (__glibc_unlikely (n == 1))
{
/* Another irregular case: since we have to store a NUL byte and
there is only room for exactly one byte, we don't have to
t = (char *) memchr ((void *) fp->_IO_read_ptr, delimiter, len);
if (t != NULL)
len = (t - fp->_IO_read_ptr) + 1;
- if (__builtin_expect (len >= SSIZE_MAX - cur_len, 0))
+ if (__glibc_unlikely (len >= SSIZE_MAX - cur_len))
{
__set_errno (EOVERFLOW);
result = -1;
enum __codecvt_result status;
_IO_ssize_t count;
- if (__builtin_expect (fp->_flags & _IO_NO_READS, 0))
+ if (__glibc_unlikely (fp->_flags & _IO_NO_READS))
{
fp->_flags |= _IO_ERR_SEEN;
__set_errno (EBADF);
const char *from = fp->_IO_read_ptr;
const char *to = fp->_IO_read_end;
size_t to_copy = count;
- if (__builtin_expect (naccbuf != 0, 0))
+ if (__glibc_unlikely (naccbuf != 0))
{
to_copy = MIN (sizeof (accbuf) - naccbuf, count);
to = __mempcpy (&accbuf[naccbuf], from, to_copy);
fp->_wide_data->_IO_buf_end,
&fp->_wide_data->_IO_read_end);
- if (__builtin_expect (naccbuf != 0, 0))
+ if (__glibc_unlikely (naccbuf != 0))
fp->_IO_read_ptr += MAX (0, read_ptr_copy - &accbuf[naccbuf - to_copy]);
else
fp->_IO_read_ptr = (char *) read_ptr_copy;
struct _IO_codecvt *cd;
const char *read_stop;
- if (__builtin_expect (fp->_flags & _IO_NO_READS, 0))
+ if (__glibc_unlikely (fp->_flags & _IO_NO_READS))
{
fp->_flags |= _IO_ERR_SEEN;
__set_errno (EBADF);
&fp->_wide_data->_IO_read_end);
/* Should we return EILSEQ? */
- if (__builtin_expect (status == __codecvt_error, 0))
+ if (__glibc_unlikely (status == __codecvt_error))
{
fp->_flags |= _IO_ERR_SEEN;
return -1;
/* If there was an error, then return WEOF.
TODO: set buffer state. */
- if (__builtin_expect (status == __codecvt_error, 0))
+ if (__glibc_unlikely (status == __codecvt_error))
return WEOF;
}
while (delta > 0);
/* We really have to load some data. First we try the archive,
but only if there was no LOCPATH environment variable specified. */
- if (__builtin_expect (locale_path == NULL, 1))
+ if (__glibc_likely (locale_path == NULL))
{
struct __locale_data *data
= _nl_load_locale_from_archive (category, name);
- if (__builtin_expect (data != NULL, 1))
+ if (__glibc_likely (data != NULL))
return data;
/* Nothing in the archive. Set the default path to search below. */
}
/* If there is no archive or it cannot be loaded for some reason fail. */
- if (__builtin_expect (headmap.ptr == NULL, 0))
+ if (__glibc_unlikely (headmap.ptr == NULL))
goto close_and_out;
/* We have the archive available. To find the name we first have to
Now we need the expected data structures to point into the data. */
lia = malloc (sizeof *lia);
- if (__builtin_expect (lia == NULL, 0))
+ if (__glibc_unlikely (lia == NULL))
return NULL;
lia->name = strdup (*namep);
- if (__builtin_expect (lia->name == NULL, 0))
+ if (__glibc_unlikely (lia->name == NULL))
{
free (lia);
return NULL;
lia->data[cnt] = _nl_intern_locale_data (cnt,
results[cnt].addr,
results[cnt].len);
- if (__builtin_expect (lia->data[cnt] != NULL, 1))
+ if (__glibc_likely (lia->data[cnt] != NULL))
{
/* _nl_intern_locale_data leaves us these fields to initialize. */
lia->data[cnt]->alloc = ld_archive;
for (cnt = 0; cnt < newdata->nstrings; ++cnt)
{
size_t idx = filedata->strindex[cnt];
- if (__builtin_expect (idx > (size_t) newdata->filesize, 0))
+ if (__glibc_unlikely (idx > (size_t) newdata->filesize))
{
puntdata:
free (newdata);
close_not_cancel_no_status (fd);
return;
}
- if (__builtin_expect (S_ISDIR (st.st_mode), 0))
+ if (__glibc_unlikely (S_ISDIR (st.st_mode)))
{
/* LOCALE/LC_foo is a directory; open LOCALE/LC_foo/SYS_LC_foo
instead. */
# endif
filedata = __mmap ((caddr_t) 0, st.st_size,
PROT_READ, MAP_FILE|MAP_COPY, fd, 0);
- if (__builtin_expect (filedata == MAP_FAILED, 0))
+ if (__glibc_unlikely (filedata == MAP_FAILED))
{
filedata = NULL;
if (__builtin_expect (errno, ENOSYS) == ENOSYS)
/* We have mapped the data, so we no longer need the descriptor. */
close_not_cancel_no_status (fd);
- if (__builtin_expect (filedata == NULL, 0))
+ if (__glibc_unlikely (filedata == NULL))
/* We failed to map or read the data. */
return;
newdata = _nl_intern_locale_data (category, filedata, st.st_size);
- if (__builtin_expect (newdata == NULL, 0))
+ if (__glibc_unlikely (newdata == NULL))
/* Bad data. */
{
#ifdef _POSIX_MAPPED_FILES
if (category != LC_ALL)
newnames[category] = (char *) locale;
- if (__builtin_expect (strchr (locale, ';') != NULL, 0))
+ if (__glibc_unlikely (strchr (locale, ';') != NULL))
{
/* This is a composite name. Make a copy and split it up. */
char *np = strdupa (locale);
cap_free (caps);
- if (__builtin_expect (res != 0, 0))
+ if (__glibc_unlikely (res != 0))
return FAIL_EXEC;
}
#endif
tsd_setspecific (arena_key, (void *) &main_arena);
thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
const char *s = NULL;
- if (__builtin_expect (_environ != NULL, 1))
+ if (__glibc_likely (_environ != NULL))
{
char **runp = _environ;
char *envline;
/* Try to re-map the extra heap space freshly to save memory, and make it
inaccessible. See malloc-sysdep.h to know when this is true. */
- if (__builtin_expect (check_may_shrink_heap (), 0))
+ if (__glibc_unlikely (check_may_shrink_heap ()))
{
if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
MAP_FIXED) == (char *) MAP_FAILED)
narenas_limit is 0. There is no possibility for narenas to
be too big for the test to always fail since there is not
enough address space to create that many arenas. */
- if (__builtin_expect (n <= narenas_limit - 1, 0))
+ if (__glibc_unlikely (n <= narenas_limit - 1))
{
if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
goto repeat;
a = _int_new_arena (size);
- if (__builtin_expect (a == NULL, 0))
+ if (__glibc_unlikely (a == NULL))
catomic_decrement (&narenas);
}
else
else
{
bck = victim->bk;
- if (__builtin_expect (bck->fd != victim, 0))
+ if (__glibc_unlikely (bck->fd != victim))
{
errstr = "malloc(): smallbin double linked list corrupted";
goto errout;
have to perform a complete insert here. */
bck = unsorted_chunks (av);
fwd = bck->fd;
- if (__builtin_expect (fwd->bk != bck, 0))
+ if (__glibc_unlikely (fwd->bk != bck))
{
errstr = "malloc(): corrupted unsorted chunks";
goto errout;
have to perform a complete insert here. */
bck = unsorted_chunks (av);
fwd = bck->fd;
- if (__builtin_expect (fwd->bk != bck, 0))
+ if (__glibc_unlikely (fwd->bk != bck))
{
errstr = "malloc(): corrupted unsorted chunks 2";
goto errout;
}
/* We know that each chunk is at least MINSIZE bytes in size or a
multiple of MALLOC_ALIGNMENT. */
- if (__builtin_expect (size < MINSIZE || !aligned_OK (size), 0))
+ if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
{
errstr = "free(): invalid size";
goto errout;
/* Lightweight tests: check whether the block is already the
top block. */
- if (__builtin_expect (p == av->top, 0))
+ if (__glibc_unlikely (p == av->top))
{
errstr = "double free or corruption (top)";
goto errout;
goto errout;
}
/* Or whether the block is actually not marked used. */
- if (__builtin_expect (!prev_inuse(nextchunk), 0))
+ if (__glibc_unlikely (!prev_inuse(nextchunk)))
{
errstr = "double free or corruption (!prev)";
goto errout;
bck = unsorted_chunks(av);
fwd = bck->fd;
- if (__builtin_expect (fwd->bk != bck, 0))
+ if (__glibc_unlikely (fwd->bk != bck))
{
errstr = "free(): corrupted unsorted chunks";
goto errout;
value. The base stack pointer might not be set if this is not
the main thread and it is the first call to any of these
functions. */
- if (__builtin_expect (!start_sp, 0))
+ if (__glibc_unlikely (!start_sp))
start_sp = GETSP ();
uintptr_t sp = GETSP ();
#ifdef STACK_GROWS_UPWARD
/* This can happen in threads where we didn't catch the thread's
stack early enough. */
- if (__builtin_expect (sp < start_sp, 0))
+ if (__glibc_unlikely (sp < start_sp))
start_sp = sp;
size_t current_stack = sp - start_sp;
#else
/* This can happen in threads where we didn't catch the thread's
stack early enough. */
- if (__builtin_expect (sp > start_sp, 0))
+ if (__glibc_unlikely (sp > start_sp))
start_sp = sp;
size_t current_stack = start_sp - sp;
#endif
struct header *result = NULL;
/* Determine real implementation if not already happened. */
- if (__builtin_expect (initialized <= 0, 0))
+ if (__glibc_unlikely (initialized <= 0))
{
if (initialized == -1)
return NULL;
size_t old_len;
/* Determine real implementation if not already happened. */
- if (__builtin_expect (initialized <= 0, 0))
+ if (__glibc_unlikely (initialized <= 0))
{
if (initialized == -1)
return NULL;
size_t size = n * len;
/* Determine real implementation if not already happened. */
- if (__builtin_expect (initialized <= 0, 0))
+ if (__glibc_unlikely (initialized <= 0))
{
if (initialized == -1)
return NULL;
struct header *real;
/* Determine real implementation if not already happened. */
- if (__builtin_expect (initialized <= 0, 0))
+ if (__glibc_unlikely (initialized <= 0))
{
if (initialized == -1)
return;
void *result = NULL;
/* Determine real implementation if not already happened. */
- if (__builtin_expect (initialized <= 0, 0))
+ if (__glibc_unlikely (initialized <= 0))
{
if (initialized == -1)
return NULL;
void *result = NULL;
/* Determine real implementation if not already happened. */
- if (__builtin_expect (initialized <= 0, 0))
+ if (__glibc_unlikely (initialized <= 0))
{
if (initialized == -1)
return NULL;
va_end (ap);
/* Determine real implementation if not already happened. */
- if (__builtin_expect (initialized <= 0, 0))
+ if (__glibc_unlikely (initialized <= 0))
{
if (initialized == -1)
return NULL;
int result;
/* Determine real implementation if not already happened. */
- if (__builtin_expect (initialized <= 0, 0))
+ if (__glibc_unlikely (initialized <= 0))
{
if (initialized == -1)
return -1;
/* Keep track of number of calls. */
catomic_increment (&calls[idx_munmap]);
- if (__builtin_expect (result == 0, 1))
+ if (__glibc_likely (result == 0))
{
/* Keep track of total memory freed using `free'. */
catomic_add (&total[idx_munmap], len);
long double
__ieee754_exp2l (long double x)
{
- if (__builtin_expect (isless (x, (long double) LDBL_MAX_EXP), 1))
+ if (__glibc_likely (isless (x, (long double) LDBL_MAX_EXP)))
{
if (__builtin_expect (isgreaterequal (x, (long double) (LDBL_MIN_EXP
- LDBL_MANT_DIG
double
__ieee754_scalb (double x, double fn)
{
- if (__builtin_expect (__isnan (x), 0))
+ if (__glibc_unlikely (__isnan (x)))
return x * fn;
- if (__builtin_expect (!__finite (fn), 0))
+ if (__glibc_unlikely (!__finite (fn)))
{
if (__isnan (fn) || fn > 0.0)
return x * fn;
return x;
return x / -fn;
}
- if (__builtin_expect ((double) (int) fn != fn, 0))
+ if (__glibc_unlikely ((double) (int) fn != fn))
return invalid_fn (x, fn);
return __scalbn (x, (int) fn);
float
__ieee754_scalbf (float x, float fn)
{
- if (__builtin_expect (__isnanf (x), 0))
+ if (__glibc_unlikely (__isnanf (x)))
return x * fn;
- if (__builtin_expect (!__finitef (fn), 0))
+ if (__glibc_unlikely (!__finitef (fn)))
{
if (__isnanf (fn) || fn > 0.0f)
return x * fn;
return x;
return x / -fn;
}
- if (__builtin_expect ((float) (int) fn != fn, 0))
+ if (__glibc_unlikely ((float) (int) fn != fn))
return invalid_fn (x, fn);
return __scalbnf (x, (int) fn);
long double
__ieee754_scalbl (long double x, long double fn)
{
- if (__builtin_expect (__isnanl (x), 0))
+ if (__glibc_unlikely (__isnanl (x)))
return x * fn;
- if (__builtin_expect (!__finitel (fn), 0))
+ if (__glibc_unlikely (!__finitel (fn)))
{
if (__isnanl (fn) || fn > 0.0L)
return x * fn;
return x;
return x / -fn;
}
- if (__builtin_expect ((long double) (int) fn != fn, 0))
+ if (__glibc_unlikely ((long double) (int) fn != fn))
return invalid_fn (x, fn);
return __scalbnl (x, (int) fn);
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls <= FP_INFINITE || icls <= FP_INFINITE, 0))
+ if (__glibc_unlikely (rcls <= FP_INFINITE || icls <= FP_INFINITE))
{
if (rcls == FP_INFINITE)
{
__imag__ res = __nan ("");
}
}
- else if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ else if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
res = x;
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls <= FP_INFINITE || icls <= FP_INFINITE, 0))
+ if (__glibc_unlikely (rcls <= FP_INFINITE || icls <= FP_INFINITE))
{
if (rcls == FP_INFINITE)
{
__imag__ res = __nanf ("");
}
}
- else if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ else if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
res = x;
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls <= FP_INFINITE || icls <= FP_INFINITE, 0))
+ if (__glibc_unlikely (rcls <= FP_INFINITE || icls <= FP_INFINITE))
{
if (icls == FP_INFINITE)
{
__imag__ res = __nan ("");
}
}
- else if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ else if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
res = x;
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls <= FP_INFINITE || icls <= FP_INFINITE, 0))
+ if (__glibc_unlikely (rcls <= FP_INFINITE || icls <= FP_INFINITE))
{
if (icls == FP_INFINITE)
{
__imag__ res = __nanf ("");
}
}
- else if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ else if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
res = x;
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls <= FP_INFINITE || icls <= FP_INFINITE, 0))
+ if (__glibc_unlikely (rcls <= FP_INFINITE || icls <= FP_INFINITE))
{
if (icls == FP_INFINITE)
{
__imag__ res = __nanl ("");
}
}
- else if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ else if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
res = x;
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls <= FP_INFINITE || icls <= FP_INFINITE, 0))
+ if (__glibc_unlikely (rcls <= FP_INFINITE || icls <= FP_INFINITE))
{
if (rcls == FP_INFINITE)
{
__imag__ res = __nanl ("");
}
}
- else if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ else if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
res = x;
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
const int t = (int) ((DBL_MAX_EXP - 1) * M_LN2);
double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincos (__imag__ x, &sinix, &cosix);
}
else if (rcls == FP_INFINITE)
{
/* Real part is infinite. */
- if (__builtin_expect (icls > FP_ZERO, 1))
+ if (__glibc_likely (icls > FP_ZERO))
{
/* Imaginary part is finite. */
double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincos (__imag__ x, &sinix, &cosix);
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
const int t = (int) ((FLT_MAX_EXP - 1) * M_LN2);
float sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosf (__imag__ x, &sinix, &cosix);
}
feraiseexcept (FE_INVALID);
}
}
- else if (__builtin_expect (rcls == FP_INFINITE, 1))
+ else if (__glibc_likely (rcls == FP_INFINITE))
{
/* Real part is infinite. */
- if (__builtin_expect (icls > FP_ZERO, 1))
+ if (__glibc_likely (icls > FP_ZERO))
{
/* Imaginary part is finite. */
float sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosf (__imag__ x, &sinix, &cosix);
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
const int t = (int) ((LDBL_MAX_EXP - 1) * M_LN2l);
long double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosl (__imag__ x, &sinix, &cosix);
}
feraiseexcept (FE_INVALID);
}
}
- else if (__builtin_expect (rcls == FP_INFINITE, 1))
+ else if (__glibc_likely (rcls == FP_INFINITE))
{
/* Real part is infinite. */
- if (__builtin_expect (icls > FP_ZERO, 1))
+ if (__glibc_likely (icls > FP_ZERO))
{
/* Imaginary part is finite. */
long double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosl (__imag__ x, &sinix, &cosix);
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
const int t = (int) ((DBL_MAX_EXP - 1) * M_LN2);
double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincos (__imag__ x, &sinix, &cosix);
}
feraiseexcept (FE_INVALID);
}
}
- else if (__builtin_expect (rcls == FP_INFINITE, 1))
+ else if (__glibc_likely (rcls == FP_INFINITE))
{
/* Real part is infinite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
double value = signbit (__real__ x) ? 0.0 : HUGE_VAL;
{
double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincos (__imag__ x, &sinix, &cosix);
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
const int t = (int) ((FLT_MAX_EXP - 1) * M_LN2);
float sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosf (__imag__ x, &sinix, &cosix);
}
feraiseexcept (FE_INVALID);
}
}
- else if (__builtin_expect (rcls == FP_INFINITE, 1))
+ else if (__glibc_likely (rcls == FP_INFINITE))
{
/* Real part is infinite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
float value = signbit (__real__ x) ? 0.0 : HUGE_VALF;
{
float sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosf (__imag__ x, &sinix, &cosix);
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
const int t = (int) ((LDBL_MAX_EXP - 1) * M_LN2l);
long double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosl (__imag__ x, &sinix, &cosix);
}
feraiseexcept (FE_INVALID);
}
}
- else if (__builtin_expect (rcls == FP_INFINITE, 1))
+ else if (__glibc_likely (rcls == FP_INFINITE))
{
/* Real part is infinite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
long double value = signbit (__real__ x) ? 0.0 : HUGE_VALL;
{
long double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosl (__imag__ x, &sinix, &cosix);
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
/* Real and imaginary part are 0.0. */
__imag__ result = signbit (__real__ x) ? M_PI : 0.0;
/* Yes, the following line raises an exception. */
__real__ result = -1.0 / fabs (__real__ x);
}
- else if (__builtin_expect (rcls != FP_NAN && icls != FP_NAN, 1))
+ else if (__glibc_likely (rcls != FP_NAN && icls != FP_NAN))
{
/* Neither real nor imaginary part is NaN. */
double absx = fabs (__real__ x), absy = fabs (__imag__ x);
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
/* Real and imaginary part are 0.0. */
__imag__ result = signbit (__real__ x) ? M_PI : 0.0;
/* Yes, the following line raises an exception. */
__real__ result = -1.0 / fabs (__real__ x);
}
- else if (__builtin_expect (rcls != FP_NAN && icls != FP_NAN, 1))
+ else if (__glibc_likely (rcls != FP_NAN && icls != FP_NAN))
{
/* Neither real nor imaginary part is NaN. */
double absx = fabs (__real__ x), absy = fabs (__imag__ x);
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
/* Real and imaginary part are 0.0. */
__imag__ result = signbit (__real__ x) ? M_PI : 0.0;
/* Yes, the following line raises an exception. */
__real__ result = -1.0 / fabsf (__real__ x);
}
- else if (__builtin_expect (rcls != FP_NAN && icls != FP_NAN, 1))
+ else if (__glibc_likely (rcls != FP_NAN && icls != FP_NAN))
{
/* Neither real nor imaginary part is NaN. */
float absx = fabsf (__real__ x), absy = fabsf (__imag__ x);
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
/* Real and imaginary part are 0.0. */
__imag__ result = signbit (__real__ x) ? M_PIl : 0.0;
/* Yes, the following line raises an exception. */
__real__ result = -1.0 / fabsl (__real__ x);
}
- else if (__builtin_expect (rcls != FP_NAN && icls != FP_NAN, 1))
+ else if (__glibc_likely (rcls != FP_NAN && icls != FP_NAN))
{
/* Neither real nor imaginary part is NaN. */
long double absx = fabsl (__real__ x), absy = fabsl (__imag__ x);
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
/* Real and imaginary part are 0.0. */
__imag__ result = signbit (__real__ x) ? M_PI : 0.0;
/* Yes, the following line raises an exception. */
__real__ result = -1.0 / fabsf (__real__ x);
}
- else if (__builtin_expect (rcls != FP_NAN && icls != FP_NAN, 1))
+ else if (__glibc_likely (rcls != FP_NAN && icls != FP_NAN))
{
/* Neither real nor imaginary part is NaN. */
float absx = fabsf (__real__ x), absy = fabsf (__imag__ x);
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0))
+ if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
{
/* Real and imaginary part are 0.0. */
__imag__ result = signbit (__real__ x) ? M_PIl : 0.0;
/* Yes, the following line raises an exception. */
__real__ result = -1.0 / fabsl (__real__ x);
}
- else if (__builtin_expect (rcls != FP_NAN && icls != FP_NAN, 1))
+ else if (__glibc_likely (rcls != FP_NAN && icls != FP_NAN))
{
/* Neither real nor imaginary part is NaN. */
long double absx = fabsl (__real__ x), absy = fabsl (__imag__ x);
__real__ x = fabs (__real__ x);
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
const int t = (int) ((DBL_MAX_EXP - 1) * M_LN2);
double sinix, cosix;
- if (__builtin_expect (rcls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (rcls != FP_SUBNORMAL))
{
__sincos (__real__ x, &sinix, &cosix);
}
/* Real part is finite. */
double sinix, cosix;
- if (__builtin_expect (rcls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (rcls != FP_SUBNORMAL))
{
__sincos (__real__ x, &sinix, &cosix);
}
__real__ x = fabsf (__real__ x);
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
const int t = (int) ((FLT_MAX_EXP - 1) * M_LN2);
float sinix, cosix;
- if (__builtin_expect (rcls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (rcls != FP_SUBNORMAL))
{
__sincosf (__real__ x, &sinix, &cosix);
}
/* Real part is finite. */
float sinix, cosix;
- if (__builtin_expect (rcls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (rcls != FP_SUBNORMAL))
{
__sincosf (__real__ x, &sinix, &cosix);
}
__real__ x = fabs (__real__ x);
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
const int t = (int) ((DBL_MAX_EXP - 1) * M_LN2);
double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincos (__imag__ x, &sinix, &cosix);
}
else if (rcls == FP_INFINITE)
{
/* Real part is infinite. */
- if (__builtin_expect (icls > FP_ZERO, 1))
+ if (__glibc_likely (icls > FP_ZERO))
{
/* Imaginary part is finite. */
double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincos (__imag__ x, &sinix, &cosix);
}
__real__ x = fabsf (__real__ x);
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
const int t = (int) ((FLT_MAX_EXP - 1) * M_LN2);
float sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosf (__imag__ x, &sinix, &cosix);
}
}
}
}
- else if (__builtin_expect (rcls == FP_INFINITE, 1))
+ else if (__glibc_likely (rcls == FP_INFINITE))
{
/* Real part is infinite. */
- if (__builtin_expect (icls > FP_ZERO, 1))
+ if (__glibc_likely (icls > FP_ZERO))
{
/* Imaginary part is finite. */
float sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosf (__imag__ x, &sinix, &cosix);
}
__real__ x = fabsl (__real__ x);
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
const int t = (int) ((LDBL_MAX_EXP - 1) * M_LN2l);
long double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosl (__imag__ x, &sinix, &cosix);
}
}
}
}
- else if (__builtin_expect (rcls == FP_INFINITE, 1))
+ else if (__glibc_likely (rcls == FP_INFINITE))
{
/* Real part is infinite. */
- if (__builtin_expect (icls > FP_ZERO, 1))
+ if (__glibc_likely (icls > FP_ZERO))
{
/* Imaginary part is finite. */
long double sinix, cosix;
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosl (__imag__ x, &sinix, &cosix);
}
__real__ x = fabsl (__real__ x);
- if (__builtin_expect (icls >= FP_ZERO, 1))
+ if (__glibc_likely (icls >= FP_ZERO))
{
/* Imaginary part is finite. */
- if (__builtin_expect (rcls >= FP_ZERO, 1))
+ if (__glibc_likely (rcls >= FP_ZERO))
{
/* Real part is finite. */
const int t = (int) ((LDBL_MAX_EXP - 1) * M_LN2l);
long double sinix, cosix;
- if (__builtin_expect (rcls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (rcls != FP_SUBNORMAL))
{
__sincosl (__real__ x, &sinix, &cosix);
}
/* Real part is finite. */
long double sinix, cosix;
- if (__builtin_expect (rcls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (rcls != FP_SUBNORMAL))
{
__sincosl (__real__ x, &sinix, &cosix);
}
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls <= FP_INFINITE || icls <= FP_INFINITE, 0))
+ if (__glibc_unlikely (rcls <= FP_INFINITE || icls <= FP_INFINITE))
{
if (icls == FP_INFINITE)
{
}
else
{
- if (__builtin_expect (icls == FP_ZERO, 0))
+ if (__glibc_unlikely (icls == FP_ZERO))
{
if (__real__ x < 0.0)
{
__imag__ res = __copysign (0.0, __imag__ x);
}
}
- else if (__builtin_expect (rcls == FP_ZERO, 0))
+ else if (__glibc_unlikely (rcls == FP_ZERO))
{
double r;
if (fabs (__imag__ x) >= 2.0 * DBL_MIN)
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls <= FP_INFINITE || icls <= FP_INFINITE, 0))
+ if (__glibc_unlikely (rcls <= FP_INFINITE || icls <= FP_INFINITE))
{
if (icls == FP_INFINITE)
{
}
else
{
- if (__builtin_expect (icls == FP_ZERO, 0))
+ if (__glibc_unlikely (icls == FP_ZERO))
{
if (__real__ x < 0.0)
{
__imag__ res = __copysignf (0.0, __imag__ x);
}
}
- else if (__builtin_expect (rcls == FP_ZERO, 0))
+ else if (__glibc_unlikely (rcls == FP_ZERO))
{
float r;
if (fabsf (__imag__ x) >= 2.0f * FLT_MIN)
int rcls = fpclassify (__real__ x);
int icls = fpclassify (__imag__ x);
- if (__builtin_expect (rcls <= FP_INFINITE || icls <= FP_INFINITE, 0))
+ if (__glibc_unlikely (rcls <= FP_INFINITE || icls <= FP_INFINITE))
{
if (icls == FP_INFINITE)
{
}
else
{
- if (__builtin_expect (icls == FP_ZERO, 0))
+ if (__glibc_unlikely (icls == FP_ZERO))
{
if (__real__ x < 0.0)
{
__imag__ res = __copysignl (0.0, __imag__ x);
}
}
- else if (__builtin_expect (rcls == FP_ZERO, 0))
+ else if (__glibc_unlikely (rcls == FP_ZERO))
{
long double r;
if (fabsl (__imag__ x) >= 2.0L * LDBL_MIN)
{
__complex__ double res;
- if (__builtin_expect (!isfinite (__real__ x) || !isfinite (__imag__ x), 0))
+ if (__glibc_unlikely (!isfinite (__real__ x) || !isfinite (__imag__ x)))
{
if (__isinf_ns (__imag__ x))
{
/* tan(x+iy) = (sin(2x) + i*sinh(2y))/(cos(2x) + cosh(2y))
= (sin(x)*cos(x) + i*sinh(y)*cosh(y)/(cos(x)^2 + sinh(y)^2). */
- if (__builtin_expect (rcls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (rcls != FP_SUBNORMAL))
{
__sincos (__real__ x, &sinrx, &cosrx);
}
{
__complex__ float res;
- if (__builtin_expect (!isfinite (__real__ x) || !isfinite (__imag__ x), 0))
+ if (__glibc_unlikely (!isfinite (__real__ x) || !isfinite (__imag__ x)))
{
if (__isinf_nsf (__imag__ x))
{
/* tan(x+iy) = (sin(2x) + i*sinh(2y))/(cos(2x) + cosh(2y))
= (sin(x)*cos(x) + i*sinh(y)*cosh(y)/(cos(x)^2 + sinh(y)^2). */
- if (__builtin_expect (fpclassify(__real__ x) != FP_SUBNORMAL, 1))
+ if (__glibc_likely (fpclassify(__real__ x) != FP_SUBNORMAL))
{
__sincosf (__real__ x, &sinrx, &cosrx);
}
{
__complex__ double res;
- if (__builtin_expect (!isfinite (__real__ x) || !isfinite (__imag__ x), 0))
+ if (__glibc_unlikely (!isfinite (__real__ x) || !isfinite (__imag__ x)))
{
if (__isinf_ns (__real__ x))
{
/* tanh(x+iy) = (sinh(2x) + i*sin(2y))/(cosh(2x) + cos(2y))
= (sinh(x)*cosh(x) + i*sin(y)*cos(y))/(sinh(x)^2 + cos(y)^2). */
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincos (__imag__ x, &sinix, &cosix);
}
{
__complex__ float res;
- if (__builtin_expect (!isfinite (__real__ x) || !isfinite (__imag__ x), 0))
+ if (__glibc_unlikely (!isfinite (__real__ x) || !isfinite (__imag__ x)))
{
if (__isinf_nsf (__real__ x))
{
/* tanh(x+iy) = (sinh(2x) + i*sin(2y))/(cosh(2x) + cos(2y))
= (sinh(x)*cosh(x) + i*sin(y)*cos(y))/(sinh(x)^2 + cos(y)^2). */
- if (__builtin_expect (fpclassify(__imag__ x) != FP_SUBNORMAL, 1))
+ if (__glibc_likely (fpclassify(__imag__ x) != FP_SUBNORMAL))
{
__sincosf (__imag__ x, &sinix, &cosix);
}
{
__complex__ long double res;
- if (__builtin_expect (!isfinite (__real__ x) || !isfinite (__imag__ x), 0))
+ if (__glibc_unlikely (!isfinite (__real__ x) || !isfinite (__imag__ x)))
{
if (__isinf_nsl (__real__ x))
{
/* tanh(x+iy) = (sinh(2x) + i*sin(2y))/(cosh(2x) + cos(2y))
= (sinh(x)*cosh(x) + i*sin(y)*cos(y))/(sinh(x)^2 + cos(y)^2). */
- if (__builtin_expect (icls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (icls != FP_SUBNORMAL))
{
__sincosl (__imag__ x, &sinix, &cosix);
}
{
__complex__ long double res;
- if (__builtin_expect (!isfinite (__real__ x) || !isfinite (__imag__ x), 0))
+ if (__glibc_unlikely (!isfinite (__real__ x) || !isfinite (__imag__ x)))
{
if (__isinf_nsl (__imag__ x))
{
/* tan(x+iy) = (sin(2x) + i*sinh(2y))/(cos(2x) + cosh(2y))
= (sin(x)*cos(x) + i*sinh(y)*cosh(y)/(cos(x)^2 + sinh(y)^2). */
- if (__builtin_expect (rcls != FP_SUBNORMAL, 1))
+ if (__glibc_likely (rcls != FP_SUBNORMAL))
{
__sincosl (__real__ x, &sinrx, &cosrx);
}
__pow (double x, double y)
{
double z = __ieee754_pow (x, y);
- if (__builtin_expect (!__finite (z), 0))
+ if (__glibc_unlikely (!__finite (z)))
{
if (_LIB_VERSION != _IEEE_)
{
__powf (float x, float y)
{
float z = __ieee754_powf (x, y);
- if (__builtin_expect (!__finitef (z), 0))
+ if (__glibc_unlikely (!__finitef (z)))
{
if (_LIB_VERSION != _IEEE_)
{
__powl (long double x, long double y)
{
long double z = __ieee754_powl (x, y);
- if (__builtin_expect (!__finitel (z), 0))
+ if (__glibc_unlikely (!__finitel (z)))
{
if (_LIB_VERSION != _IEEE_)
{
{
double z = __ieee754_scalb (x, fn);
- if (__builtin_expect (__isinf (z), 0))
+ if (__glibc_unlikely (__isinf (z)))
{
if (__finite (x))
return __kernel_standard (x, fn, 32); /* scalb overflow */
{
float z = __ieee754_scalbf (x, fn);
- if (__builtin_expect (__isinff (z), 0))
+ if (__glibc_unlikely (__isinff (z)))
{
if (__finitef (x))
return __kernel_standard_f (x, fn, 132); /* scalb overflow */
{
long double z = __ieee754_scalbl (x, fn);
- if (__builtin_expect (__isinfl (z), 0))
+ if (__glibc_unlikely (__isinfl (z)))
{
if (__finitel (x))
return __kernel_standard_l (x, fn, 232); /* scalb overflow */
if (res != len)
break;
- if (__builtin_expect (len >= SIZE_MAX / sizeof (wchar_t) / 2, 0))
+ if (__glibc_unlikely (len >= SIZE_MAX / sizeof (wchar_t) / 2))
{
/* This really should not happen if everything is fine. */
res = (size_t) -1;
{
/* Catch bugs which would be hidden by the TIMESPEC_TO_TIMEVAL
computations. The division by 1000 truncates values. */
- if (__builtin_expect (timeout->tv_nsec < 0, 0))
+ if (__glibc_unlikely (timeout->tv_nsec < 0))
{
__set_errno (EINVAL);
return -1;
dir_binding *bptrp, unsigned int flags)
{
nis_error retcode = __nisfind_server (name, 1, dirp, bptrp, flags);
- if (__builtin_expect (retcode != NIS_SUCCESS, 0))
+ if (__glibc_unlikely (retcode != NIS_SUCCESS))
return retcode;
do
cb = (struct nis_cb *) calloc (1,
sizeof (struct nis_cb) + sizeof (nis_server));
- if (__builtin_expect (cb == NULL, 0))
+ if (__glibc_unlikely (cb == NULL))
goto failed;
cb->serv = (nis_server *) (cb + 1);
cb->serv->name = strdup (nis_local_principal ());
- if (__builtin_expect (cb->serv->name == NULL, 0))
+ if (__glibc_unlikely (cb->serv->name == NULL))
goto failed;
cb->serv->ep.ep_val = (endpoint *) calloc (2, sizeof (endpoint));
- if (__builtin_expect (cb->serv->ep.ep_val == NULL, 0))
+ if (__glibc_unlikely (cb->serv->ep.ep_val == NULL))
goto failed;
cb->serv->ep.ep_len = 1;
cb->serv->ep.ep_val[0].family = strdup ("inet");
- if (__builtin_expect (cb->serv->ep.ep_val[0].family == NULL, 0))
+ if (__glibc_unlikely (cb->serv->ep.ep_val[0].family == NULL))
goto failed;
cb->callback = callback;
cb->userdata = userdata;
}
cb->serv->ep.ep_val[0].proto = strdup ((flags & USE_DGRAM) ? "udp" : "tcp");
- if (__builtin_expect (cb->serv->ep.ep_val[0].proto == NULL, 0))
+ if (__glibc_unlikely (cb->serv->ep.ep_val[0].proto == NULL))
goto failed;
cb->xprt = ((flags & USE_DGRAM)
? svcudp_bufcreate (sock, 100, 8192)
return NIS_BADNAME;
obj = calloc (1, sizeof (nis_object));
- if (__builtin_expect (obj == NULL, 0))
+ if (__glibc_unlikely (obj == NULL))
return NIS_NOMEMORY;
obj->zo_oid.ctime = obj->zo_oid.mtime = time (NULL);
return strcpy (buffer, ".");
}
- if (__builtin_expect (cptr_len >= buflen, 0))
+ if (__glibc_unlikely (cptr_len >= buflen))
{
__set_errno (ERANGE);
return NULL;
pings = malloc (sizeof (struct findserv_req) * pings_max);
xid_seed = (u_int32_t) (time (NULL) ^ getpid ());
- if (__builtin_expect (pings == NULL, 0))
+ if (__glibc_unlikely (pings == NULL))
return -1;
memset (&sin, '\0', sizeof (sin));
pings_max += 10;
new_pings = realloc (pings, sizeof (struct findserv_req) *
pings_max);
- if (__builtin_expect (new_pings == NULL, 0))
+ if (__glibc_unlikely (new_pings == NULL))
{
free (pings);
return -1;
serv =
malloc (sizeof (nis_server *) *
(NIS_RES_OBJECT (res)->DI_data.do_servers.do_servers_len + 1));
- if (__builtin_expect (serv == NULL, 0))
+ if (__glibc_unlikely (serv == NULL))
{
nis_freeresult (res);
return NULL;
server =
&NIS_RES_OBJECT (res)->DI_data.do_servers.do_servers_val[i];
serv[i] = calloc (1, sizeof (nis_server));
- if (__builtin_expect (serv[i] == NULL, 0))
+ if (__glibc_unlikely (serv[i] == NULL))
{
free_all:
while (i-- > 0)
if (server->name != NULL)
{
serv[i]->name = strdup (server->name);
- if (__builtin_expect (serv[i]->name == NULL, 0))
+ if (__glibc_unlikely (serv[i]->name == NULL))
{
++i;
goto free_all;
serv[i]->ep.ep_val =
malloc (server->ep.ep_len * sizeof (endpoint));
- if (__builtin_expect (serv[i]->ep.ep_val == NULL, 0))
+ if (__glibc_unlikely (serv[i]->ep.ep_val == NULL))
{
++i;
goto free_all;
if (server->pkey.n_len > 0)
{
serv[i]->pkey.n_bytes = malloc (server->pkey.n_len);
- if (__builtin_expect (serv[i]->pkey.n_bytes == NULL, 0))
+ if (__glibc_unlikely (serv[i]->pkey.n_bytes == NULL))
{
++i;
goto free_all;
else
{
serv = malloc (sizeof (nis_server *));
- if (__builtin_expect (serv != NULL, 0))
+ if (__glibc_unlikely (serv != NULL))
serv[0] = NULL;
}
req.ns_object.ns_object_val = NULL;
status = __prepare_niscall (req.ns_name, &dir, &bptr, flags);
- if (__builtin_expect (status != NIS_SUCCESS, 0))
+ if (__glibc_unlikely (status != NIS_SUCCESS))
{
NIS_RES_STATUS (res) = status;
goto out;
dir = NULL;
status = __prepare_niscall (req.ns_name, &dir,
&bptr, flags);
- if (__builtin_expect (status != NIS_SUCCESS, 0))
+ if (__glibc_unlikely (status != NIS_SUCCESS))
{
NIS_RES_STATUS (res) = status;
goto out;
while (name[i] != '.' && name[i] != '\0')
i++;
- if (__builtin_expect (i >= buflen, 0))
+ if (__glibc_unlikely (i >= buflen))
{
__set_errno (ERANGE);
return NULL;
int count = 2;
nis_name *getnames = malloc ((count + 1) * sizeof (char *));
- if (__builtin_expect (getnames == NULL, 0))
+ if (__glibc_unlikely (getnames == NULL))
return NULL;
/* Do we have a fully qualified NIS+ name ? If yes, give it back */
count += 5;
nis_name *newp = realloc (getnames,
(count + 1) * sizeof (char *));
- if (__builtin_expect (newp == NULL, 0))
+ if (__glibc_unlikely (newp == NULL))
goto free_null;
getnames = newp;
}
tmp = malloc (strlen (cptr) + local_domain_len + name_len + 2);
- if (__builtin_expect (tmp == NULL, 0))
+ if (__glibc_unlikely (tmp == NULL))
goto free_null;
getnames[pos] = tmp;
char *p;
tmp = malloc (cplen + local_domain_len + name_len + 2);
- if (__builtin_expect (tmp == NULL, 0))
+ if (__glibc_unlikely (tmp == NULL))
goto free_null;
p = __stpcpy (tmp, name);
char *p;
tmp = malloc (cplen + name_len + 3);
- if (__builtin_expect (tmp == NULL, 0))
+ if (__glibc_unlikely (tmp == NULL))
goto free_null;
p = __mempcpy (tmp, name, name_len);
count += 5;
nis_name *newp = realloc (getnames,
(count + 1) * sizeof (char *));
- if (__builtin_expect (newp == NULL, 0))
+ if (__glibc_unlikely (newp == NULL))
goto free_null;
getnames = newp;
}
if (cptr != NULL)
*cptr++ = '\0';
- if (__builtin_expect (val == NULL, 0))
+ if (__glibc_unlikely (val == NULL))
{
nis_free_request (ibreq);
return NULL;
}
while (__nisbind_connect (&bptr) != NIS_SUCCESS)
- if (__builtin_expect (__nisbind_next (&bptr) != NIS_SUCCESS, 0))
+ if (__glibc_unlikely (__nisbind_next (&bptr) != NIS_SUCCESS))
{
NIS_RES_STATUS (res) = NIS_NAMEUNREACHABLE;
goto fail;
(xdrproc_t) _xdr_nis_result,
(caddr_t) res, RPCTIMEOUT);
- if (__builtin_expect (clnt_status != RPC_SUCCESS, 0))
+ if (__glibc_unlikely (clnt_status != RPC_SUCCESS))
NIS_RES_STATUS (res) = NIS_RPCERROR;
else
switch (NIS_RES_STATUS (res))
free (ibreq->ibr_name);
ibreq->ibr_name = NULL;
/* If we hit the link limit, bail. */
- if (__builtin_expect (count_links > NIS_MAXLINKS, 0))
+ if (__glibc_unlikely (count_links > NIS_MAXLINKS))
{
NIS_RES_STATUS (res) = NIS_LINKNAMEERROR;
++done;
/* Try the next domainname if we don't follow a link. */
free (ibreq->ibr_name);
ibreq->ibr_name = NULL;
- if (__builtin_expect (count_links, 0))
+ if (__glibc_unlikely (count_links))
{
NIS_RES_STATUS (res) = NIS_LINKNAMEERROR;
++done;
(caddr_t) ibreq,
(xdrproc_t) _xdr_nis_result,
(caddr_t) res, 0, NULL);
- if (__builtin_expect (status != NIS_SUCCESS, 0))
+ if (__glibc_unlikely (status != NIS_SUCCESS))
NIS_RES_STATUS (res) = status;
nis_free_request (ibreq);
(xdrproc_t) _xdr_ib_request,
(caddr_t) ibreq, (xdrproc_t) _xdr_nis_result,
(caddr_t) res, 0, NULL);
- if (__builtin_expect (status != NIS_SUCCESS, 0))
+ if (__glibc_unlikely (status != NIS_SUCCESS))
NIS_RES_STATUS (res) = status;
nis_free_request (ibreq);
(caddr_t) ibreq, (xdrproc_t) _xdr_nis_result,
(caddr_t) res, 0, NULL);
- if (__builtin_expect (status != NIS_SUCCESS, 0))
+ if (__glibc_unlikely (status != NIS_SUCCESS))
NIS_RES_STATUS (res) = status;
nis_free_request (ibreq);
(caddr_t) ibreq, (xdrproc_t) _xdr_nis_result,
(caddr_t) res, 0, NULL);
- if (__builtin_expect (status != NIS_SUCCESS, 0))
+ if (__glibc_unlikely (status != NIS_SUCCESS))
NIS_RES_STATUS (res) = status;
if (cookie != NULL)
if (__builtin_expect (res, TRUE))
{
res = xdr_string (xdrs, &objp->family, ~0);
- if (__builtin_expect (res, 1))
+ if (__glibc_likely (res))
res = xdr_string (xdrs, &objp->proto, ~0);
}
return res;
do
{
/* We need at least 3 characters for one line. */
- if (__builtin_expect (buflen < 3, 0))
+ if (__glibc_unlikely (buflen < 3))
{
erange:
*errnop = ERANGE;
!(parse_res = _nss_files_parse_grent (p, result, data, buflen,
errnop)));
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
/* The parser ran out of space. */
goto erange_reset;
do
{
/* We need at least 3 characters for one line. */
- if (__builtin_expect (buflen < 3, 0))
+ if (__glibc_unlikely (buflen < 3))
{
erange:
*errnop = ERANGE;
!(parse_res = _nss_files_parse_grent (p, result, data, buflen,
errnop)));
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
/* The parser ran out of space. */
goto erange_reset;
do
{
/* We need at least 3 characters for one line. */
- if (__builtin_expect (buflen < 3, 0))
+ if (__glibc_unlikely (buflen < 3))
{
erange:
*errnop = ERANGE;
!(parse_res = _nss_files_parse_grent (p, result, data, buflen,
errnop)));
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
/* The parser ran out of space. */
goto erange_reset;
gid_t *groups = *groupsp;
/* Matches user. Insert this group. */
- if (__builtin_expect (*start == *size, 0))
+ if (__glibc_unlikely (*start == *size))
{
/* Need a bigger buffer. */
gid_t *newgroups;
do
{
/* We need at least 3 characters for one line. */
- if (__builtin_expect (buflen < 3, 0))
+ if (__glibc_unlikely (buflen < 3))
{
erange:
*errnop = ERANGE;
!(parse_res = _nss_files_parse_grent (p, &grpbuf, data, buflen,
errnop)));
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
/* The parser ran out of space. */
goto erange_reset;
do
{
/* We need at least 3 characters for one line. */
- if (__builtin_expect (buflen < 3, 0))
+ if (__glibc_unlikely (buflen < 3))
{
erange:
*errnop = ERANGE;
!(parse_res = _nss_files_parse_pwent (p, result, data, buflen,
errnop)));
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
/* The parser ran out of space. */
goto erange_reset;
do
{
/* We need at least 3 characters for one line. */
- if (__builtin_expect (buflen < 3, 0))
+ if (__glibc_unlikely (buflen < 3))
{
erange:
*errnop = ERANGE;
!(parse_res = _nss_files_parse_pwent (p, result, data, buflen,
errnop)));
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
/* The parser ran out of space. */
goto erange_reset;
do
{
/* We need at least 3 characters for one line. */
- if (__builtin_expect (buflen < 3, 0))
+ if (__glibc_unlikely (buflen < 3))
{
erange:
*errnop = ERANGE;
!(parse_res = _nss_files_parse_pwent (p, result, data, buflen,
errnop)));
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
/* The parser ran out of space. */
goto erange_reset;
do
{
/* We need at least 3 characters for one line. */
- if (__builtin_expect (buflen < 3, 0))
+ if (__glibc_unlikely (buflen < 3))
{
erange:
*errnop = ERANGE;
|| !(parse_res = _nss_files_parse_spent (p, result, data,
buflen, errnop)));
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
/* The parser ran out of space. */
goto erange_reset;
do
{
/* We need at least 3 characters for one line. */
- if (__builtin_expect (buflen < 3, 0))
+ if (__glibc_unlikely (buflen < 3))
{
erange:
*errnop = ERANGE;
!(parse_res = _nss_files_parse_spent (p, result, data, buflen,
errnop)));
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
/* The parser ran out of space. */
goto erange_reset;
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
alias->alias_local = 0;
yperr = yp_next (domain, "mail.aliases", oldkey, oldkeylen, &outkey,
&keylen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
parse_res = _nss_nis_parse_aliasent (outkey, p, alias, buffer,
buflen, errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
{
free (outkey);
*errnop = ERANGE;
}
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
size_t namlen = strlen (name);
if (!use_alloca)
free (name2);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
alias->alias_local = 0;
int parse_res = _nss_nis_parse_aliasent (name, p, alias, buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
}
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
char *result;
int yperr = yp_match (domain, "ethers.byname", name, strlen (name), &result,
&len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_etherent (p, eth, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
}
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
char buf[33];
int len;
int yperr = yp_match (domain, "ethers.byaddr", buf, nlen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_etherent (p, eth, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
{
/* We have to read all the data now. */
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
struct ypall_callback ypcb;
handle_batch_read:
bucket = intern.next;
- if (__builtin_expect (intern.offset >= bucket->size, 0))
+ if (__glibc_unlikely (intern.offset >= bucket->size))
{
if (bucket->next == NULL)
return NSS_STATUS_NOTFOUND;
yperr = yp_next (domain, "group.byname", oldkey, oldkeylen,
&outkey, &keylen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
}
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
if (!batch_read)
free (result);
parse_res = _nss_files_parse_grent (p, grp, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
{
if (!batch_read)
free (outkey);
}
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
char *result;
int yperr = yp_match (domain, "group.byname", name, strlen (name), &result,
&len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
char *buffer, size_t buflen, int *errnop)
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
char buf[32];
int len;
int yperr = yp_match (domain, "group.bygid", buf, nlen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_grent (p, grp, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
int af, int flags)
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
uintptr_t pad = -(uintptr_t) buffer % __alignof__ (struct parser_data);
buffer += pad;
struct parser_data *data = (void *) buffer;
- if (__builtin_expect (buflen < sizeof *data + 1 + pad, 0))
+ if (__glibc_unlikely (buflen < sizeof *data + 1 + pad))
{
*errnop = ERANGE;
*h_errnop = NETDB_INTERNAL;
yperr = yp_next (domain, "hosts.byname", oldkey, oldkeylen, &outkey,
&keylen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > linebuflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > linebuflen))
{
free (result);
*h_errnop = NETDB_INTERNAL;
free (result);
parse_res = parse_line (p, host, data, buflen, errnop, af, flags);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
{
free (outkey);
*h_errnop = NETDB_INTERNAL;
int len;
int yperr = yp_match (domain, "hosts.byname", name2, namlen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
}
const size_t linebuflen = buffer + buflen - data->linebuffer;
- if (__builtin_expect ((size_t) (len + 1) > linebuflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > linebuflen))
{
free (result);
*h_errnop = NETDB_INTERNAL;
int parse_res = parse_line (p, host, data, buflen, errnop, af, flags);
- if (__builtin_expect (parse_res < 1 || host->h_addrtype != af, 0))
+ if (__glibc_unlikely (parse_res < 1 || host->h_addrtype != af))
{
if (parse_res == -1)
{
int *errnop, int *h_errnop)
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
uintptr_t pad = -(uintptr_t) buffer % __alignof__ (struct parser_data);
buffer += pad;
struct parser_data *data = (void *) buffer;
- if (__builtin_expect (buflen < sizeof *data + 1 + pad, 0))
+ if (__glibc_unlikely (buflen < sizeof *data + 1 + pad))
{
*errnop = ERANGE;
*h_errnop = NETDB_INTERNAL;
int yperr = yp_match (domain, "hosts.byaddr", buf, strlen (buf), &result,
&len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
}
const size_t linebuflen = buffer + buflen - data->linebuffer;
- if (__builtin_expect ((size_t) (len + 1) > linebuflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > linebuflen))
{
free (result);
*errnop = ERANGE;
int parse_res = parse_line (p, host, data, buflen, errnop, af,
((_res.options & RES_USE_INET6)
? AI_V4MAPPED : 0));
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
{
int len;
int yperr = yp_match (domain, "hosts.byname", name2, namlen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
buffer += pad;
buflen = buflen > pad ? buflen - pad : 0;
- if (__builtin_expect (buflen < sizeof (struct gaih_addrtuple), 0))
+ if (__glibc_unlikely (buflen < sizeof (struct gaih_addrtuple)))
{
erange:
free (result);
struct parser_data *data = (void *) buffer;
- if (__builtin_expect (buflen < sizeof *data + 1 + pad, 0))
+ if (__glibc_unlikely (buflen < sizeof *data + 1 + pad))
goto erange;
buflen -= pad;
struct hostent host;
int parse_res = parse_line (result, &host, data, buflen, errnop, AF_UNSPEC,
0);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
{
{
struct response_t *bucket = intern->next;
- if (__builtin_expect (intern->offset >= bucket->size, 0))
+ if (__glibc_unlikely (intern->offset >= bucket->size))
{
if (bucket->next == NULL)
return NSS_STATUS_NOTFOUND;
++intern->offset;
size_t len = strlen (p) + 1;
- if (__builtin_expect (len > buflen, 0))
+ if (__glibc_unlikely (len > buflen))
{
*errnop = ERANGE;
return NSS_STATUS_TRYAGAIN;
parse_res = _nss_files_parse_grent (p, grp, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
return NSS_STATUS_TRYAGAIN;
intern->offset += len;
int reslen;
int yperr = yp_match (domainname, "netid.byname", key, keylen, &result,
&reslen);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
return yperr2nss (yperr);
/* Parse the result: following the colon is a comma separated list of
status = NSS_STATUS_SUCCESS;
- if (__builtin_expect (group == NULL || group[0] == '\0', 0))
+ if (__glibc_unlikely (group == NULL || group[0] == '\0'))
return NSS_STATUS_UNAVAIL;
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
status = yperr2nss (yp_match (domain, "netgroup", group, strlen (group),
&netgrp->data, &len));
- if (__builtin_expect (status == NSS_STATUS_SUCCESS, 1))
+ if (__glibc_likely (status == NSS_STATUS_SUCCESS))
{
/* Our implementation of yp_match already allocates a buffer
which is one byte larger than the value in LEN specifies
struct parser_data *data = (void *) buffer;
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
/* Get the next entry until we found a correct one. */
yperr = yp_next (domain, "networks.byname", oldkey, oldkeylen, &outkey,
&keylen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
free (result);
parse_res = _nss_files_parse_netent (p, net, data, buflen, errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
{
free (outkey);
*herrnop = NETDB_INTERNAL;
}
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
struct parser_data *data = (void *) buffer;
int yperr = yp_match (domain, "networks.byname", name2, namlen, &result,
&len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_netent (p, net, data, buflen, errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
*herrnop = NETDB_INTERNAL;
if (parse_res == -1)
int *herrnop)
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
struct in_addr in = { .s_addr = htonl (addr) };
int yperr = yp_match (domain, "networks.byaddr", buf, blen, &result,
&len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
}
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_netent (p, net, (void *) buffer,
buflen, errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
*herrnop = NETDB_INTERNAL;
if (parse_res == -1)
}
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
char *result;
int yperr = yp_match (domain, "protocols.byname", name, strlen (name),
&result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_protoent (p, proto, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
char *buffer, size_t buflen, int *errnop)
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
char buf[32];
int yperr = yp_match (domain, "protocols.bynumber", buf, nlen, &result,
&len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_protoent (p, proto, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
int yperr = yp_match (domain, "publickey.byname", netname, strlen (netname),
&result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
int yperr = yp_match (domain, "publickey.byname", netname, strlen (netname),
&result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
{
struct response_t *bucket = intern->next;
- if (__builtin_expect (bucket == NULL, 0))
+ if (__glibc_unlikely (bucket == NULL))
{
#define MINSIZE 4096 - 4 * sizeof (void *)
const size_t minsize = MAX (MINSIZE, 2 * (invallen + 1));
}
char *p = mempcpy (&bucket->mem[intern->offset], inval, invallen);
- if (__builtin_expect (p[-1] != '\0', 0))
+ if (__glibc_unlikely (p[-1] != '\0'))
{
*p = '\0';
++invallen;
{
/* We have to read all the data now. */
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
struct ypall_callback ypcb;
handle_batch_read:
bucket = intern.next;
- if (__builtin_expect (intern.offset >= bucket->size, 0))
+ if (__glibc_unlikely (intern.offset >= bucket->size))
{
if (bucket->next == NULL)
return NSS_STATUS_NOTFOUND;
yperr = yp_next (domain, "passwd.byname", oldkey, oldkeylen,
&outkey, &keylen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
else
{
non_adjunct:
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
parse_res = _nss_files_parse_pwent (p, pwd, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
{
if (!batch_read)
free (outkey);
}
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
size_t namelen = strlen (name);
int len;
int yperr = yp_match (domain, "passwd.byname", name, namelen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
else
{
non_adjunct:
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_pwent (p, pwd, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
char *buffer, size_t buflen, int *errnop)
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
char buf[32];
int len;
int yperr = yp_match (domain, "passwd.byuid", buf, nlen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
else
{
non_adjunct:
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_pwent (p, pwd, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
{
struct response_t *bucket = intern->next;
- if (__builtin_expect (intern->offset >= bucket->size, 0))
+ if (__glibc_unlikely (intern->offset >= bucket->size))
{
if (bucket->next == NULL)
return NSS_STATUS_NOTFOUND;
++intern->offset;
size_t len = strlen (p) + 1;
- if (__builtin_expect (len > buflen, 0))
+ if (__glibc_unlikely (len > buflen))
{
*errnop = ERANGE;
return NSS_STATUS_TRYAGAIN;
p = memcpy (buffer, &bucket->mem[intern->offset], len);
parse_res = _nss_files_parse_rpcent (p, rpc, pdata, buflen, errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
return NSS_STATUS_TRYAGAIN;
intern->offset += len;
intern_t data = { NULL, NULL, 0 };
enum nss_status status = internal_nis_setrpcent (&data);
- if (__builtin_expect (status != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (status != NSS_STATUS_SUCCESS))
return status;
int found = 0;
internal_nis_endrpcent (&data);
- if (__builtin_expect (!found && status == NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (!found && status == NSS_STATUS_SUCCESS))
return NSS_STATUS_NOTFOUND;
return status;
char *buffer, size_t buflen, int *errnop)
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
char buf[32];
int len;
int yperr = yp_match (domain, "rpc.bynumber", buf, nlen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_rpcent (p, rpc, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
{
struct search_t *req = (struct search_t *) indata;
- if (__builtin_expect (instatus != YP_TRUE, 0))
+ if (__glibc_unlikely (instatus != YP_TRUE))
return 1;
if (inkey && inkeylen > 0 && inval && invallen > 0)
{
- if (__builtin_expect ((size_t) (invallen + 1) > req->buflen, 0))
+ if (__glibc_unlikely ((size_t) (invallen + 1) > req->buflen))
{
*req->errnop = ERANGE;
req->status = NSS_STATUS_TRYAGAIN;
{
struct response_t *bucket = intern.next;
- if (__builtin_expect (intern.offset >= bucket->size, 0))
+ if (__glibc_unlikely (intern.offset >= bucket->size))
{
if (bucket->next == NULL)
return NSS_STATUS_NOTFOUND;
++intern.offset;
size_t len = strlen (p) + 1;
- if (__builtin_expect (len > buflen, 0))
+ if (__glibc_unlikely (len > buflen))
{
*errnop = ERANGE;
return NSS_STATUS_TRYAGAIN;
p = memcpy (buffer, &bucket->mem[intern.offset], len);
parse_res = _nss_files_parse_servent (p, serv, pdata, buflen, errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
return NSS_STATUS_TRYAGAIN;
intern.offset += len;
}
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
/* If the protocol is given, we could try if our NIS server knows
/* If we found the key, it's ok and parse the result. If not,
fall through and parse the complete table. */
- if (__builtin_expect (status == YPERR_SUCCESS, 1))
+ if (__glibc_likely (status == YPERR_SUCCESS))
{
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_servent (p, serv, (void *) buffer,
buflen, errnop);
- if (__builtin_expect (parse_res < 0, 0))
+ if (__glibc_unlikely (parse_res < 0))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
req.status = NSS_STATUS_NOTFOUND;
status = yp_all (domain, "services.byname", &ypcb);
- if (__builtin_expect (status != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (status != YPERR_SUCCESS))
return yperr2nss (status);
return req.status;
size_t buflen, int *errnop)
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
/* If the protocol is given, we only need one query.
/* If we found the key, it's ok and parse the result. If not,
fall through and parse the complete table. */
- if (__builtin_expect (status == YPERR_SUCCESS, 1))
+ if (__glibc_likely (status == YPERR_SUCCESS))
{
- if (__builtin_expect ((size_t) (len + 1) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + 1) > buflen))
{
free (result);
*errnop = ERANGE;
free (result);
int parse_res = _nss_files_parse_servent (p, serv, (void *) buffer,
buflen, errnop);
- if (__builtin_expect (parse_res < 0, 0))
+ if (__glibc_unlikely (parse_res < 0))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
req.status = NSS_STATUS_NOTFOUND;
int status = yp_all (domain, "services.byname", &ypcb);
- if (__builtin_expect (status != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (status != YPERR_SUCCESS))
return yperr2nss (status);
return req.status;
int *errnop)
{
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
/* Get the next entry until we found a correct one. */
? "passwd.adjunct.byname" : "shadow.byname"),
oldkey, oldkeylen, &outkey, &keylen, &result, &len);
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
const size_t name_len = strlen (name);
char *domain;
- if (__builtin_expect (yp_get_default_domain (&domain), 0))
+ if (__glibc_unlikely (yp_get_default_domain (&domain)))
return NSS_STATUS_UNAVAIL;
bool adjunct_used = false;
adjunct_used = true;
}
- if (__builtin_expect (yperr != YPERR_SUCCESS, 0))
+ if (__glibc_unlikely (yperr != YPERR_SUCCESS))
{
enum nss_status retval = yperr2nss (yperr);
return retval;
}
- if (__builtin_expect ((size_t) (len + (adjunct_used ? 3 : 1)) > buflen, 0))
+ if (__glibc_unlikely ((size_t) (len + (adjunct_used ? 3 : 1)) > buflen))
{
free (result);
*errnop = ERANGE;
int parse_res = _nss_files_parse_spent (p, sp, (void *) buffer, buflen,
errnop);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
nis_freeresult (result);
/* We do not need the lookup result anymore. */
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
__set_errno (olderr);
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
nis_freeresult (result);
/* We do not need the lookup result anymore. */
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
__set_errno (olderr);
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
nis_freeresult (result);
/* We do not need the lookup result anymore. */
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
return NSS_STATUS_TRYAGAIN;
parse_res = _nss_nisplus_parse_grent (&result, gr,
buffer, buflen, errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
{
*errnop = ERANGE;
retval = NSS_STATUS_TRYAGAIN;
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
parse_res = _nss_nisplus_parse_grent (result, gr, buffer, buflen, errnop);
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
{
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
parse_res = _nss_nisplus_parse_grent (result, gr, buffer, buflen, errnop);
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
__set_errno (olderr);
}
int retval = niserr2nss (result->status);
- if (__builtin_expect (retval != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (retval != NSS_STATUS_SUCCESS))
{
if (retval == NSS_STATUS_TRYAGAIN)
{
}
retval = niserr2nss (result->status);
- if (__builtin_expect (retval != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (retval != NSS_STATUS_SUCCESS))
{
if (retval == NSS_STATUS_TRYAGAIN)
{
enum nss_status status = internal_gethostbyname2_r (name, AF_UNSPEC, &host,
buffer, buflen,
errnop, herrnop, 0);
- if (__builtin_expect (status == NSS_STATUS_SUCCESS, 1))
+ if (__glibc_likely (status == NSS_STATUS_SUCCESS))
{
if (*pat == NULL)
{
buffer += pad;
buflen = buflen > pad ? buflen - pad : 0;
- if (__builtin_expect (buflen < sizeof (struct gaih_addrtuple), 0))
+ if (__glibc_unlikely (buflen < sizeof (struct gaih_addrtuple)))
{
free (result);
*errnop = ERANGE;
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
gid_t gid;
char *endp;
- if (__builtin_expect (numstr[len - 1] != '\0', 0))
+ if (__glibc_unlikely (numstr[len - 1] != '\0'))
{
char numstrbuf[len + 1];
memcpy (numstrbuf, numstr, len);
}
retval = niserr2nss (result->status);
- if (__builtin_expect (retval != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (retval != NSS_STATUS_SUCCESS))
{
if (retval == NSS_STATUS_TRYAGAIN)
{
return NSS_STATUS_TRYAGAIN;
}
enum nss_status retval = niserr2nss (result->status);
- if (__builtin_expect (retval != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (retval != NSS_STATUS_SUCCESS))
{
if (b2len > 2 && buf2[b2len - 2] == '.' && buf2[b2len - 1] == '0')
{
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
parse_res = _nss_nisplus_parse_pwent (&result, pw, buffer,
buflen, errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
{
*errnop = ERANGE;
retval = NSS_STATUS_TRYAGAIN;
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
{
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
{
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
parse_res = _nss_nisplus_parse_servent (result, serv, buffer,
buflen, errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
{
nis_freeresult (result);
result = saved_res;
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
errnop);
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
{
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
errnop);
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
{
parse_res = _nss_nisplus_parse_spent (result, sp, buffer,
buflen, errnop);
- if (__builtin_expect (parse_res == -1, 0))
+ if (__glibc_unlikely (parse_res == -1))
{
nis_freeresult (result);
result = saved_res;
return NSS_STATUS_TRYAGAIN;
}
- if (__builtin_expect (niserr2nss (result->status) != NSS_STATUS_SUCCESS, 0))
+ if (__glibc_unlikely (niserr2nss (result->status) != NSS_STATUS_SUCCESS))
{
enum nss_status status = niserr2nss (result->status);
parse_res = _nss_nisplus_parse_spent (result, sp, buffer, buflen, errnop);
nis_freeresult (result);
- if (__builtin_expect (parse_res < 1, 0))
+ if (__glibc_unlikely (parse_res < 1))
{
if (parse_res == -1)
{
{
is_new = 1;
ysd = (dom_binding *) calloc (1, sizeof *ysd);
- if (__builtin_expect (ysd == NULL, 0))
+ if (__glibc_unlikely (ysd == NULL))
return YPERR_RESRC;
}
*outvallen = resp.val.valdat_len;
*outval = malloc (*outvallen + 1);
int status = YPERR_RESRC;
- if (__builtin_expect (*outval != NULL, 1))
+ if (__glibc_likely (*outval != NULL))
{
memcpy (*outval, resp.val.valdat_val, *outvallen);
(*outval)[*outvallen] = '\0';
(caddr_t) &req, (xdrproc_t) __xdr_ypresp_all,
(caddr_t) &data, RPCTIMEOUT);
- if (__builtin_expect (result != RPC_SUCCESS, 0))
+ if (__glibc_unlikely (result != RPC_SUCCESS))
{
/* Print the error message only on the last try. */
if (try == MAXTRIES - 1)
(caddr_t) &indomain, (xdrproc_t) xdr_ypresp_maplist,
(caddr_t) &resp);
- if (__builtin_expect (result == YPERR_SUCCESS, 1))
+ if (__glibc_likely (result == YPERR_SUCCESS))
{
*outmaplist = resp.maps;
/* We don't free the list, this will be done by ypserv
+2014-02-10 Ondřej Bílka <neleai@seznam.cz>
+
+ * allocatestack.c (queue_stack, allocate_stack,
+ __deallocate_stack, __reclaim_stacks): Use glibc_likely instead
+ __builtin_expect.
+ * cancellation.c (__pthread_enable_asynccancel,
+ __pthread_disable_asynccancel): Likewise.
+ * cleanup_defer.c (__pthread_register_cancel_defer,
+ __pthread_unregister_cancel_restore): Likewise.
+ * cleanup_defer_compat.c (_pthread_cleanup_push_defer,
+ _pthread_cleanup_pop_restore): Likewise.
+ * cond-perf.c (main): Likewise.
+ * nptl-init.c (sigcancel_handler, sighandler_setxid): Likewise.
+ * perf.c (get_clockfreq): Likewise.
+ * pthread_barrier_destroy.c (pthread_barrier_destroy): Likewise.
+ * pthread_barrier_init.c (pthread_barrier_init): Likewise.
+ * pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise.
+ * pthread_cond_wait.c (__pthread_cond_wait): Likewise.
+ * pthread_create.c (__free_tcb, start_thread, __pthread_create_2_1):
+ Likewise.
+ * pthread_getattr_np.c (pthread_getattr_np): Likewise.
+ * pthread_getspecific.c (__pthread_getspecific): Likewise.
+ * pthread_join.c (pthread_join): Likewise.
+ * pthread_key_delete.c (pthread_key_delete): Likewise.
+ * pthread_mutex_init.c (__pthread_mutex_init): Likewise.
+ * pthread_mutex_lock.c (__pthread_mutex_lock,
+ __pthread_mutex_lock_full): Likewise.
+ * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Likewise.
+ * pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise.
+ * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Likewise.
+ * pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Likewise.
+ * pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock): Likewise.
+ * pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock): Likewise.
+ * pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise.
+ * pthread_setcancelstate.c (__pthread_setcancelstate): Likewise.
+ * pthread_setcanceltype.c (__pthread_setcanceltype): Likewise.
+ * pthread_setschedprio.c (pthread_setschedprio): Likewise.
+ * pthread_setspecific.c (__pthread_setspecific): Likewise.
+ * sem_init.c (__new_sem_init): Likewise.
+ * sem_open.c (__where_is_shmfs): Likewise.
+ * sigaction.c: Likewise.
+ * sockperf.c (get_clockfreq): Likewise.
+ * sysdeps/pthread/createthread.c (do_clone, create_thread): Likewise.
+ * sysdeps/pthread/setxid.h: Likewise.
+ * sysdeps/pthread/timer_create.c (timer_create): Likewise.
+ * sysdeps/pthread/unwind-forcedunwind.c (pthread_cancel_init,
+ __unwind_freeres, _Unwind_Resume, __gcc_personality_v0,
+ _Unwind_ForcedUnwind): Likewise.
+ * sysdeps/unix/sysv/linux/getpid.c (__getpid): Likewise.
+ * sysdeps/unix/sysv/linux/lowlevelrobustlock.c
+ (__lll_robust_lock_wait, __lll_robust_timedlock_wait): Likewise.
+ * sysdeps/unix/sysv/linux/mq_notify.c (mq_notify): Likewise.
+ * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+ * sysdeps/unix/sysv/linux/pthread_kill.c (__pthread_kill): Likewise.
+ * sysdeps/unix/sysv/linux/pthread_setaffinity.c
+ (__pthread_setaffinity_new): Likewise.
+ * sysdeps/unix/sysv/linux/pthread_sigqueue.c (pthread_sigqueue):
+ Likewise.
+ * sysdeps/unix/sysv/linux/pt-raise.c (raise): Likewise.
+ * sysdeps/unix/sysv/linux/raise.c (raise): Likewise.
+ * sysdeps/unix/sysv/linux/s390/lowlevellock.h (__lll_robust_trylock,
+ __lll_robust_lock, __lll_cond_lock, __lll_robust_timedlock): Likewise.
+ * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (__lll_lock,
+ __lll_cond_lock, __lll_timedlock, __lll_robust_timedlock): Likewise.
+ * sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c
+ (pthread_barrier_destroy): Likewise.
+ * sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c
+ (pthread_barrier_init): Likewise.
+ * sysdeps/unix/sysv/linux/sparc/sem_init.c (__new_sem_init): Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/timer_create.c (__timer_create_old):
+ Likewise.
+ * unwind.c (unwind_stop): Likewise.
+
2014-02-08 Mike Frysinger <vapier@gentoo.org>
* sem_open.c (__where_is_shmfs): Compare f.f_type to RAMFS_MAGIC too.
stack_list_add (&stack->list, &stack_cache);
stack_cache_actsize += stack->stackblock_size;
- if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
+ if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize))
__free_stacks (stack_cache_maxsize);
}
}
/* Get memory for the stack. */
- if (__builtin_expect (attr->flags & ATTR_FLAG_STACKADDR, 0))
+ if (__glibc_unlikely (attr->flags & ATTR_FLAG_STACKADDR))
{
uintptr_t adj;
mem = mmap (NULL, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
- if (__builtin_expect (mem == MAP_FAILED, 0))
+ if (__glibc_unlikely (mem == MAP_FAILED))
return errno;
/* SIZE is guaranteed to be greater than zero.
/* Make sure the coloring offsets does not disturb the alignment
of the TCB and static TLS block. */
- if (__builtin_expect ((coloring & __static_tls_align_m1) != 0, 0))
+ if (__glibc_unlikely ((coloring & __static_tls_align_m1) != 0))
coloring = (((coloring + __static_tls_align_m1)
& ~(__static_tls_align_m1))
& ~pagesize_m1);
}
/* Create or resize the guard area if necessary. */
- if (__builtin_expect (guardsize > pd->guardsize, 0))
+ if (__glibc_unlikely (guardsize > pd->guardsize))
{
#ifdef NEED_SEPARATE_REGISTER_STACK
char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
not reset the 'used' flag in the 'tid' field. This is done by
the kernel. If no thread has been created yet this field is
still zero. */
- if (__builtin_expect (! pd->user_stack, 1))
+ if (__glibc_likely (! pd->user_stack))
(void) queue_stack (pd);
else
/* Free the memory associated with the ELF TLS. */
INIT_LIST_HEAD (&stack_used);
INIT_LIST_HEAD (&__stack_user);
- if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0))
+ if (__glibc_unlikely (THREAD_GETMEM (self, user_stack)))
list_add (&self->list, &__stack_user);
else
list_add (&self->list, &stack_used);
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
oldval);
- if (__builtin_expect (curval == oldval, 1))
+ if (__glibc_likely (curval == oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
{
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
oldval);
- if (__builtin_expect (curval == oldval, 1))
+ if (__glibc_likely (curval == oldval))
break;
/* Prepare the next round. */
int cancelhandling = THREAD_GETMEM (self, cancelhandling);
/* Disable asynchronous cancellation for now. */
- if (__builtin_expect (cancelhandling & CANCELTYPE_BITMASK, 0))
+ if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK))
while (1)
{
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
cancelhandling
& ~CANCELTYPE_BITMASK,
cancelhandling);
- if (__builtin_expect (curval == cancelhandling, 1))
+ if (__glibc_likely (curval == cancelhandling))
/* Successfully replaced the value. */
break;
cancelhandling
| CANCELTYPE_BITMASK,
cancelhandling);
- if (__builtin_expect (curval == cancelhandling, 1))
+ if (__glibc_likely (curval == cancelhandling))
/* Successfully replaced the value. */
break;
int cancelhandling = THREAD_GETMEM (self, cancelhandling);
/* Disable asynchronous cancellation for now. */
- if (__builtin_expect (cancelhandling & CANCELTYPE_BITMASK, 0))
+ if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK))
while (1)
{
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
cancelhandling
& ~CANCELTYPE_BITMASK,
cancelhandling);
- if (__builtin_expect (curval == cancelhandling, 1))
+ if (__glibc_likely (curval == cancelhandling))
/* Successfully replaced the value. */
break;
cancelhandling
| CANCELTYPE_BITMASK,
cancelhandling);
- if (__builtin_expect (curval == cancelhandling, 1))
+ if (__glibc_likely (curval == cancelhandling))
/* Successfully replaced the value. */
break;
pthread_t th[nthreads];
int i;
for (i = 0; __builtin_expect (i < nthreads, 1); ++i)
- if (__builtin_expect ((err = pthread_create (&th[i], NULL, cons, (void *) (long) i)) != 0, 0))
+ if (__glibc_unlikely ((err = pthread_create (&th[i], NULL, cons, (void *) (long) i)) != 0))
printf ("pthread_create: %s\n", strerror (err));
for (i = 0; __builtin_expect (i < nrounds, 1); ++i)
/* Determine the process ID. It might be negative if the thread is
in the middle of a fork() call. */
pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
- if (__builtin_expect (pid < 0, 0))
+ if (__glibc_unlikely (pid < 0))
pid = -pid;
/* Safety check. It would be possible to call this function for
/* Determine the process ID. It might be negative if the thread is
in the middle of a fork() call. */
pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
- if (__builtin_expect (pid < 0, 0))
+ if (__glibc_unlikely (pid < 0))
pid = -pid;
/* Safety check. It would be possible to call this function for
return result;
fd = open ("/proc/cpuinfo", O_RDONLY);
- if (__builtin_expect (fd != -1, 1))
+ if (__glibc_likely (fd != -1))
{
/* XXX AFAIK the /proc filesystem can generate "files" only up
to a size of 4096 bytes. */
{
char *mhz = memmem (buf, n, "cpu MHz", 7);
- if (__builtin_expect (mhz != NULL, 1))
+ if (__glibc_likely (mhz != NULL))
{
char *endp = buf + n;
int seen_decpoint = 0;
lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
- if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1))
+ if (__glibc_likely (ibarrier->left == ibarrier->init_count))
/* The barrier is not used anymore. */
result = 0;
else
{
struct pthread_barrier *ibarrier;
- if (__builtin_expect (count == 0, 0))
+ if (__glibc_unlikely (count == 0))
return EINVAL;
const struct pthread_barrierattr *iattr
/* Work around the fact that the kernel rejects negative timeout values
despite them being valid. */
- if (__builtin_expect (abstime->tv_sec < 0, 0))
+ if (__glibc_unlikely (abstime->tv_sec < 0))
goto timeout;
/* Remember the mutex we are using here. If there is already a
--rt.tv_sec;
}
/* Did we already time out? */
- if (__builtin_expect (rt.tv_sec < 0, 0))
+ if (__glibc_unlikely (rt.tv_sec < 0))
{
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
goto bc_out;
break;
/* Not woken yet. Maybe the time expired? */
- if (__builtin_expect (err == -ETIMEDOUT, 0))
+ if (__glibc_unlikely (err == -ETIMEDOUT))
{
timeout:
/* Yep. Adjust the counters. */
/* Now we can release the mutex. */
err = __pthread_mutex_unlock_usercnt (mutex, 0);
- if (__builtin_expect (err, 0))
+ if (__glibc_unlikely (err))
{
lll_unlock (cond->__data.__lock, pshared);
return err;
abort ();
/* Free TPP data. */
- if (__builtin_expect (pd->tpp != NULL, 0))
+ if (__glibc_unlikely (pd->tpp != NULL))
{
struct priority_protection_data *tpp = pd->tpp;
__ctype_init ();
/* Allow setxid from now onwards. */
- if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0) == -2, 0))
+ if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
#ifdef __NR_set_robust_list
/* If the parent was running cancellation handlers while creating
the thread the new thread inherited the signal mask. Reset the
cancellation signal mask. */
- if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
+ if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK))
{
INTERNAL_SYSCALL_DECL (err);
sigset_t mask;
int not_first_call;
not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
- if (__builtin_expect (! not_first_call, 1))
+ if (__glibc_likely (! not_first_call))
{
/* Store the new cleanup handler info. */
THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
- if (__builtin_expect (pd->stopped_start, 0))
+ if (__glibc_unlikely (pd->stopped_start))
{
int oldtype = CANCEL_ASYNC ();
/* If this is the last thread we terminate the process now. We
do not notify the debugger, it might just irritate it if there
is no thread left. */
- if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
+ if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
/* This was the last thread. */
exit (0);
/* Report the death of the thread if this is wanted. */
- if (__builtin_expect (pd->report_events, 0))
+ if (__glibc_unlikely (pd->report_events))
{
/* See whether TD_DEATH is in any of the mask. */
const int idx = __td_eventword (TD_DEATH);
if (IS_DETACHED (pd))
/* Free the TCB. */
__free_tcb (pd);
- else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
+ else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
{
/* Some other thread might call any of the setXid functions and expect
us to reply. In this case wait until we did that. */
int err = ALLOCATE_STACK (iattr, &pd);
int retval = 0;
- if (__builtin_expect (err != 0, 0))
+ if (__glibc_unlikely (err != 0))
/* Something went wrong. Maybe a parameter of the attributes is
invalid or we could not allocate memory. Note we have to
translate error codes. */
iattr->guardsize = thread->reported_guardsize;
/* The sizes are subject to alignment. */
- if (__builtin_expect (thread->stackblock != NULL, 1))
+ if (__glibc_likely (thread->stackblock != NULL))
{
iattr->stacksize = thread->stackblock_size;
iattr->stackaddr = (char *) thread->stackblock + iattr->stacksize;
/* Special case access to the first 2nd-level block. This is the
usual case. */
- if (__builtin_expect (key < PTHREAD_KEY_2NDLEVEL_SIZE, 1))
+ if (__glibc_likely (key < PTHREAD_KEY_2NDLEVEL_SIZE))
data = &THREAD_SELF->specific_1stblock[key];
else
{
{
uintptr_t seq = data->seq;
- if (__builtin_expect (seq != __pthread_keys[key].seq, 0))
+ if (__glibc_unlikely (seq != __pthread_keys[key].seq))
result = data->data = NULL;
}
pthread_cleanup_pop (0);
- if (__builtin_expect (result == 0, 1))
+ if (__glibc_likely (result == 0))
{
/* We mark the thread as terminated and as joined. */
pd->tid = -1;
{
int result = EINVAL;
- if (__builtin_expect (key < PTHREAD_KEYS_MAX, 1))
+ if (__glibc_likely (key < PTHREAD_KEYS_MAX))
{
unsigned int seq = __pthread_keys[key].seq;
case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
#ifndef __ASSUME_FUTEX_LOCK_PI
- if (__builtin_expect (tpi_supported == 0, 0))
+ if (__glibc_unlikely (tpi_supported == 0))
{
int lock = 0;
INTERNAL_SYSCALL_DECL (err);
assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
}
- if (__builtin_expect (tpi_supported < 0, 0))
+ if (__glibc_unlikely (tpi_supported < 0))
return ENOTSUP;
#endif
break;
| PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
return __pthread_mutex_lock_full (mutex);
- if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_NP, 1))
+ if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
{
FORCE_ELISION (mutex, goto elision);
simple:
assert (mutex->__data.__owner == 0);
}
#ifdef HAVE_ELISION
- else if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_ELISION_NP, 1))
+ else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
{
elision: __attribute__((unused))
/* This case can never happen on a system without elision,
if (mutex->__data.__owner == id)
{
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
/* Check whether we already hold the mutex. */
- if (__builtin_expect (mutex->__data.__owner == id, 0))
+ if (__glibc_unlikely (mutex->__data.__owner == id))
return EDEADLK;
goto simple;
}
}
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
int kind = PTHREAD_MUTEX_TYPE (mutex);
if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
NULL);
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
oldval = mutex->__data.__lock;
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
}
- if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
{
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
{
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
if (mutex->__data.__owner == id)
{
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
/* Error checking mutex. */
case PTHREAD_MUTEX_ERRORCHECK_NP:
/* Check whether we already hold the mutex. */
- if (__builtin_expect (mutex->__data.__owner == id, 0))
+ if (__glibc_unlikely (mutex->__data.__owner == id))
return EDEADLK;
/* FALLTHROUGH */
}
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
int kind = PTHREAD_MUTEX_TYPE (mutex);
if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
NULL);
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
oldval = mutex->__data.__lock;
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
}
- if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
{
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
{
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
if (mutex->__data.__owner == id)
{
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
}
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
int kind = PTHREAD_MUTEX_TYPE (mutex);
if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
NULL);
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
oldval = mutex->__data.__lock;
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
oldval = mutex->__data.__lock;
}
- if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
{
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
{
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
return 0;
}
- else if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_ELISION_NP, 1))
+ else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
{
/* Don't reset the owner/users fields for elision. */
return lll_unlock_elision (mutex->__data.__lock,
|| PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
{
/* Increment the reader counter. Avoid overflow. */
- if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
+ if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0))
{
/* Overflow on number of readers. */
--rwlock->__data.__nr_readers;
}
/* Remember that we are a reader. */
- if (__builtin_expect (++rwlock->__data.__nr_readers_queued == 0, 0))
+ if (__glibc_unlikely (++rwlock->__data.__nr_readers_queued == 0))
{
/* Overflow on number of queued readers. */
--rwlock->__data.__nr_readers_queued;
/* Work around the fact that the kernel rejects negative timeout values
despite them being valid. */
- if (__builtin_expect (abstime->tv_sec < 0, 0))
+ if (__glibc_unlikely (abstime->tv_sec < 0))
{
result = ETIMEDOUT;
break;
/* Work around the fact that the kernel rejects negative timeout values
despite them being valid. */
- if (__builtin_expect (abstime->tv_sec < 0, 0))
+ if (__glibc_unlikely (abstime->tv_sec < 0))
{
result = ETIMEDOUT;
break;
&& (rwlock->__data.__nr_writers_queued == 0
|| PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
{
- if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
+ if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0))
{
--rwlock->__data.__nr_readers;
result = EAGAIN;
atomically since other bits could be modified as well. */
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
oldval);
- if (__builtin_expect (curval == oldval, 1))
+ if (__glibc_likely (curval == oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
__do_cancel ();
atomically since other bits could be modified as well. */
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
oldval);
- if (__builtin_expect (curval == oldval, 1))
+ if (__glibc_likely (curval == oldval))
{
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
{
param.sched_priority = pd->tpp->priomax;
/* Try to set the scheduler information. */
- if (__builtin_expect (sched_setparam (pd->tid, ¶m) == -1, 0))
+ if (__glibc_unlikely (sched_setparam (pd->tid, ¶m) == -1))
result = errno;
else
{
/* Special case access to the first 2nd-level block. This is the
usual case. */
- if (__builtin_expect (key < PTHREAD_KEY_2NDLEVEL_SIZE, 1))
+ if (__glibc_likely (key < PTHREAD_KEY_2NDLEVEL_SIZE))
{
/* Verify the key is sane. */
if (KEY_UNUSED ((seq = __pthread_keys[key].seq)))
unsigned int value;
{
/* Parameter sanity check. */
- if (__builtin_expect (value > SEM_VALUE_MAX, 0))
+ if (__glibc_unlikely (value > SEM_VALUE_MAX))
{
__set_errno (EINVAL);
return -1;
unsigned int value;
{
/* Parameter sanity check. */
- if (__builtin_expect (value > SEM_VALUE_MAX, 0))
+ if (__glibc_unlikely (value > SEM_VALUE_MAX))
{
__set_errno (EINVAL);
return -1;
/* OK, do it the hard way. Look through the /proc/mounts file and if
this does not exist through /etc/fstab to find the mount point. */
fp = __setmntent ("/proc/mounts", "r");
- if (__builtin_expect (fp == NULL, 0))
+ if (__glibc_unlikely (fp == NULL))
{
fp = __setmntent (_PATH_MNTTAB, "r");
- if (__builtin_expect (fp == NULL, 0))
+ if (__glibc_unlikely (fp == NULL))
/* There is nothing we can do. Blind guesses are not helpful. */
return;
}
const struct sigaction *act;
struct sigaction *oact;
{
- if (__builtin_expect (sig == SIGCANCEL || sig == SIGSETXID, 0))
+ if (__glibc_unlikely (sig == SIGCANCEL || sig == SIGSETXID))
{
__set_errno (EINVAL);
return -1;
return result;
fd = open ("/proc/cpuinfo", O_RDONLY);
- if (__builtin_expect (fd != -1, 1))
+ if (__glibc_likely (fd != -1))
{
/* XXX AFAIK the /proc filesystem can generate "files" only up
to a size of 4096 bytes. */
{
char *mhz = memmem (buf, n, "cpu MHz", 7);
- if (__builtin_expect (mhz != NULL, 1))
+ if (__glibc_likely (mhz != NULL))
{
char *endp = buf + n;
int seen_decpoint = 0;
PREPARE_CREATE;
#endif
- if (__builtin_expect (stopped != 0, 0))
+ if (__glibc_unlikely (stopped != 0))
/* We make sure the thread does not run far by forcing it to get a
lock. We lock it here too so that the new thread cannot continue
until we tell it to. */
int rc = ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
pd, &pd->tid, TLS_VALUE, &pd->tid);
- if (__builtin_expect (rc == -1, 0))
+ if (__glibc_unlikely (rc == -1))
{
atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second. */
}
/* Now we have the possibility to set scheduling parameters etc. */
- if (__builtin_expect (stopped != 0, 0))
+ if (__glibc_unlikely (stopped != 0))
{
INTERNAL_SYSCALL_DECL (err);
int res = 0;
res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
attr->cpusetsize, attr->cpuset);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
{
/* The operation failed. We have to kill the thread. First
send it the cancellation signal. */
res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
pd->schedpolicy, &pd->schedparam);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
goto err_out;
}
}
| CLONE_CHILD_CLEARTID | CLONE_SYSVSEM
| 0);
- if (__builtin_expect (THREAD_GETMEM (THREAD_SELF, report_events), 0))
+ if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events)))
{
/* The parent thread is supposed to report events. Check whether
the TD_CREATE event is needed, too. */
({ \
extern __typeof (__nptl_setxid) __nptl_setxid __attribute__((weak));\
int __result; \
- if (__builtin_expect (__nptl_setxid != NULL, 0)) \
+ if (__glibc_unlikely (__nptl_setxid != NULL)) \
{ \
struct xid_command __cmd; \
__cmd.syscall_no = __NR_##name; \
pthread_mutex_lock (&__timer_mutex);
newtimer = __timer_alloc ();
- if (__builtin_expect (newtimer == NULL, 0))
+ if (__glibc_unlikely (newtimer == NULL))
{
__set_errno (EAGAIN);
goto unlock_bail;
thread = __timer_thread_alloc (&newtimer->attr, clock_id);
/* Out of luck; no threads are available. */
- if (__builtin_expect (thread == NULL, 0))
+ if (__glibc_unlikely (thread == NULL))
{
__set_errno (EAGAIN);
goto unlock_bail;
void *getcfa;
void *handle;
- if (__builtin_expect (libgcc_s_handle != NULL, 1))
+ if (__glibc_likely (libgcc_s_handle != NULL))
{
/* Force gcc to reload all values. */
asm volatile ("" ::: "memory");
void
_Unwind_Resume (struct _Unwind_Exception *exc)
{
- if (__builtin_expect (libgcc_s_handle == NULL, 0))
+ if (__glibc_unlikely (libgcc_s_handle == NULL))
pthread_cancel_init ();
else
atomic_read_barrier ();
struct _Unwind_Exception *ue_header,
struct _Unwind_Context *context)
{
- if (__builtin_expect (libgcc_s_handle == NULL, 0))
+ if (__glibc_unlikely (libgcc_s_handle == NULL))
pthread_cancel_init ();
else
atomic_read_barrier ();
_Unwind_ForcedUnwind (struct _Unwind_Exception *exc, _Unwind_Stop_Fn stop,
void *stop_argument)
{
- if (__builtin_expect (libgcc_s_handle == NULL, 0))
+ if (__glibc_unlikely (libgcc_s_handle == NULL))
pthread_cancel_init ();
else
atomic_read_barrier ();
_Unwind_Word
_Unwind_GetCFA (struct _Unwind_Context *context)
{
- if (__builtin_expect (libgcc_s_handle == NULL, 0))
+ if (__glibc_unlikely (libgcc_s_handle == NULL))
pthread_cancel_init ();
else
atomic_read_barrier ();
static inline __attribute__((always_inline)) pid_t
really_getpid (pid_t oldval)
{
- if (__builtin_expect (oldval == 0, 1))
+ if (__glibc_likely (oldval == 0))
{
pid_t selftid = THREAD_GETMEM (THREAD_SELF, tid);
- if (__builtin_expect (selftid != 0, 1))
+ if (__glibc_likely (selftid != 0))
return selftid;
}
pid_t result = INTERNAL_SYSCALL (getpid, err, 0);
#else
pid_t result = THREAD_GETMEM (THREAD_SELF, pid);
- if (__builtin_expect (result <= 0, 0))
+ if (__glibc_unlikely (result <= 0))
result = really_getpid (result);
#endif
return result;
do
{
- if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
return oldval;
int newval = oldval | FUTEX_WAITERS;
/* Work around the fact that the kernel rejects negative timeout values
despite them being valid. */
- if (__builtin_expect (abstime->tv_sec < 0, 0))
+ if (__glibc_unlikely (abstime->tv_sec < 0))
return ETIMEDOUT;
do
#endif
/* Wait. */
- if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
return oldval;
int newval = oldval | FUTEX_WAITERS;
/* If we cannot create the netlink socket we cannot provide
SIGEV_THREAD support. */
- if (__builtin_expect (netlink_socket == -1, 0))
+ if (__glibc_unlikely (netlink_socket == -1))
{
__set_errno (ENOSYS);
return -1;
int retval = INLINE_SYSCALL (mq_notify, 2, mqdes, &se);
/* If it failed, free the allocated memory. */
- if (__builtin_expect (retval != 0, 0))
+ if (__glibc_unlikely (retval != 0))
free (data.attr);
return retval;
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
- if (__builtin_expect (__val > 1, 0)) \
+ if (__glibc_unlikely (__val > 1)) \
lll_futex_wake (__futex, 1, private); \
}))
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
- if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ if (__glibc_unlikely (__val & FUTEX_WAITERS)) \
lll_futex_wake (__futex, 1, private); \
}))
fork function temporarily invalidated the PID field. Adjust for
that. */
pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
- if (__builtin_expect (pid < 0, 0))
+ if (__glibc_unlikely (pid < 0))
pid = -pid;
return INLINE_SYSCALL (tgkill, 3, pid, THREAD_GETMEM (THREAD_SELF, tid),
if a thread exits between ESRCH test and tgkill, we might return
EINVAL, because pd->tid would be cleared by the kernel. */
pid_t tid = atomic_forced_read (pd->tid);
- if (__builtin_expect (tid <= 0, 0))
+ if (__glibc_unlikely (tid <= 0))
/* Not a valid thread handle. */
return ESRCH;
INTERNAL_SYSCALL_DECL (err);
int res;
- if (__builtin_expect (__kernel_cpumask_size == 0, 0))
+ if (__glibc_unlikely (__kernel_cpumask_size == 0))
{
res = __determine_cpumask_size (pd->tid);
if (res != 0)
if a thread exits between ESRCH test and tgkill, we might return
EINVAL, because pd->tid would be cleared by the kernel. */
pid_t tid = atomic_forced_read (pd->tid);
- if (__builtin_expect (tid <= 0, 0))
+ if (__glibc_unlikely (tid <= 0))
/* Not a valid thread handle. */
return ESRCH;
/* raise is an async-safe function. It could be called while the
fork/vfork function temporarily invalidated the PID field. Adjust for
that. */
- if (__builtin_expect (pid <= 0, 0))
+ if (__glibc_unlikely (pid <= 0))
pid = (pid & INT_MAX) == 0 ? selftid : -pid;
return INLINE_SYSCALL (tgkill, 3, pid, selftid, sig);
__attribute__ ((always_inline))
__lll_lock (int *futex, int private)
{
- if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0))
+ if (__glibc_unlikely (atomic_compare_and_exchange_bool_acq (futex, 1, 0)))
{
if (__builtin_constant_p (private) && private == LLL_PRIVATE)
__lll_lock_wait_private (futex);
__attribute__ ((always_inline))
__lll_cond_lock (int *futex, int private)
{
- if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 2, 0), 0))
+ if (__glibc_unlikely (atomic_compare_and_exchange_bool_acq (futex, 2, 0)))
__lll_lock_wait (futex, private);
}
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
__lll_timedlock (int *futex, const struct timespec *abstime, int private)
{
int result = 0;
- if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0))
+ if (__glibc_unlikely (atomic_compare_and_exchange_bool_acq (futex, 1, 0)))
result = __lll_timedlock_wait (futex, abstime, private);
return result;
}
int *__futexp = (futex); \
\
lll_compare_and_swap (__futexp, __oldval, __newval, "slr %2,%2"); \
- if (__builtin_expect (__oldval > 1, 0)) \
+ if (__glibc_unlikely (__oldval > 1)) \
lll_futex_wake (__futexp, 1, private); \
})
#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
int *__futexp = (futex); \
\
lll_compare_and_swap (__futexp, __oldval, __newval, "slr %2,%2"); \
- if (__builtin_expect (__oldval & FUTEX_WAITERS, 0)) \
+ if (__glibc_unlikely (__oldval & FUTEX_WAITERS)) \
lll_futex_wake (__futexp, 1, private); \
})
#define lll_robust_unlock(futex, private) \
{
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
- if (__builtin_expect (val != 0, 0))
+ if (__glibc_unlikely (val != 0))
{
if (__builtin_constant_p (private) && private == LLL_PRIVATE)
__lll_lock_wait_private (futex);
{
int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0);
- if (__builtin_expect (val != 0, 0))
+ if (__glibc_unlikely (val != 0))
__lll_lock_wait (futex, private);
}
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
int result = 0;
- if (__builtin_expect (val != 0, 0))
+ if (__glibc_unlikely (val != 0))
result = __lll_timedlock_wait (futex, abstime, private);
return result;
}
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_24_rel (__futex, 0); \
- if (__builtin_expect (__val > 1, 0)) \
+ if (__glibc_unlikely (__val > 1)) \
lll_futex_wake (__futex, 1, private); \
}))
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
- if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ if (__glibc_unlikely (__val & FUTEX_WAITERS)) \
lll_futex_wake (__futex, 1, private); \
}))
lll_lock (ibarrier->b.lock, private);
- if (__builtin_expect (ibarrier->b.left == ibarrier->b.init_count, 1))
+ if (__glibc_likely (ibarrier->b.left == ibarrier->b.init_count))
/* The barrier is not used anymore. */
result = 0;
else
{
union sparc_pthread_barrier *ibarrier;
- if (__builtin_expect (count == 0, 0))
+ if (__glibc_unlikely (count == 0))
return EINVAL;
struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr;
unsigned int value;
{
/* Parameter sanity check. */
- if (__builtin_expect (value > SEM_VALUE_MAX, 0))
+ if (__glibc_unlikely (value > SEM_VALUE_MAX))
{
__set_errno (EINVAL);
return -1;
unsigned int value;
{
/* Parameter sanity check. */
- if (__builtin_expect (value > SEM_VALUE_MAX, 0))
+ if (__glibc_unlikely (value > SEM_VALUE_MAX))
{
__set_errno (EINVAL);
return -1;
break;
}
- if (__builtin_expect (i == OLD_TIMER_MAX, 0))
+ if (__glibc_unlikely (i == OLD_TIMER_MAX))
{
/* No free slot. */
(void) __timer_delete_new (newp);
adj))
do_longjump = 1;
- if (__builtin_expect (curp != NULL, 0))
+ if (__glibc_unlikely (curp != NULL))
{
/* Handle the compatibility stuff. Execute all handlers
registered with the old method which would be unwound by this
char strdata[0];
} *dataset = NULL;
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
if (he == NULL)
dbg_log (_("Haven't found \"%s\" in hosts cache!"), (char *) key);
struct dataset *newp
= (struct dataset *) mempool_alloc (db, total + req->key_len,
1);
- if (__builtin_expect (newp != NULL, 1))
+ if (__glibc_likely (newp != NULL))
{
/* Adjust pointer into the memory block. */
key_copy = (char *) newp + (key_copy - (char *) dataset);
bool first, struct database_dyn *table,
uid_t owner, bool prune_wakeup)
{
- if (__builtin_expect (debug_level >= 2, 0))
+ if (__glibc_unlikely (debug_level >= 2))
{
const char *str;
char buf[INET6_ADDRSTRLEN + 1];
bool *mark;
size_t memory_needed = cnt * sizeof (bool);
bool mark_use_alloca;
- if (__builtin_expect (memory_needed <= MAX_STACK_USE, 1))
+ if (__glibc_likely (memory_needed <= MAX_STACK_USE))
{
mark = alloca (cnt * sizeof (bool));
memset (mark, '\0', memory_needed);
char *const data = table->data;
bool any = false;
- if (__builtin_expect (debug_level > 2, 0))
+ if (__glibc_unlikely (debug_level > 2))
dbg_log (_("pruning %s cache; time %ld"),
dbnames[table - dbs], (long int) now);
struct datahead *dh = (struct datahead *) (data + runp->packet);
/* Some debug support. */
- if (__builtin_expect (debug_level > 2, 0))
+ if (__glibc_unlikely (debug_level > 2))
{
char buf[INET6_ADDRSTRLEN];
const char *str;
}
while (cnt > 0);
- if (__builtin_expect (fd != -1, 0))
+ if (__glibc_unlikely (fd != -1))
{
/* Reply to the INVALIDATE initiator that the cache has been
invalidated. */
/* Now we have to get the write lock since we are about to modify
the table. */
- if (__builtin_expect (pthread_rwlock_trywrlock (&table->lock) != 0, 0))
+ if (__glibc_unlikely (pthread_rwlock_trywrlock (&table->lock) != 0))
{
++table->head->wrlockdelayed;
pthread_rwlock_wrlock (&table->lock);
MS_ASYNC);
/* One extra pass if we do debugging. */
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
struct hashentry *runp = head;
}
}
- if (__builtin_expect (! mark_use_alloca, 0))
+ if (__glibc_unlikely (! mark_use_alloca))
free (mark);
/* Run garbage collection if any entry has been removed or replaced. */
if (! dbs[dbidx].enabled || ! dbs[dbidx].check_file)
return;
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
dbg_log (_("register trace file %s for database %s"),
finfo->fname, dbnames[dbidx]);
#endif
(void) TEMP_FAILURE_RETRY (sendmsg (fd, &msg, MSG_NOSIGNAL));
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
dbg_log (_("provide access to FD %d, for %s"), db->ro_fd, key);
}
#endif /* SCM_RIGHTS */
}
/* Is this service enabled? */
- if (__builtin_expect (!db->enabled, 0))
+ if (__glibc_unlikely (!db->enabled))
{
/* No, sent the prepared record. */
if (TEMP_FAILURE_RETRY (send (fd, db->disabled_iov->iov_base,
}
/* Be sure we can read the data. */
- if (__builtin_expect (pthread_rwlock_tryrdlock (&db->lock) != 0, 0))
+ if (__glibc_unlikely (pthread_rwlock_tryrdlock (&db->lock) != 0))
{
++db->head->rdlockdelayed;
pthread_rwlock_rdlock (&db->lock);
ssize_t nwritten;
#ifdef HAVE_SENDFILE
- if (__builtin_expect (db->mmap_used, 1))
+ if (__glibc_likely (db->mmap_used))
{
assert (db->wr_fd != -1);
assert ((char *) cached->data > (char *) db->data);
dbs[my_number].head->timestamp = now;
struct timespec prune_ts;
- if (__builtin_expect (clock_gettime (timeout_clock, &prune_ts) == -1, 0))
+ if (__glibc_unlikely (clock_gettime (timeout_clock, &prune_ts) == -1))
/* Should never happen. */
abort ();
we need to wake up occasionally to update the timestamp.
Wait 90% of the update period. */
#define UPDATE_MAPPING_TIMEOUT (MAPPING_TIMEOUT * 9 / 10)
- if (__builtin_expect (! dont_need_update, 0))
+ if (__glibc_unlikely (! dont_need_update))
{
next_wait = MIN (UPDATE_MAPPING_TIMEOUT, next_wait);
dbs[my_number].head->timestamp = now;
#ifdef SO_PEERCRED
pid_t pid = 0;
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
struct ucred caller;
socklen_t optlen = sizeof (caller);
}
bool do_signal = true;
- if (__builtin_expect (nready == 0, 0))
+ if (__glibc_unlikely (nready == 0))
{
++client_queued;
do_signal = false;
sizeof (inev)));
if (nb < (ssize_t) sizeof (struct inotify_event))
{
- if (__builtin_expect (nb == -1 && errno != EAGAIN, 0))
+ if (__glibc_unlikely (nb == -1 && errno != EAGAIN))
{
/* Something went wrong when reading the inotify
data. Better disable inotify. */
bool use_malloc = false;
int errval = 0;
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
if (he == NULL)
dbg_log (_("Haven't found \"%s\" in group cache!"), keystr);
{
errno = 0;
- if (__builtin_expect (buflen > 32768, 0))
+ if (__glibc_unlikely (buflen > 32768))
{
char *old_buffer = buffer;
buflen *= 2;
int errval = 0;
int32_t ttl = INT32_MAX;
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
const char *str;
char buf[INET6_ADDRSTRLEN + 1];
{
errno = 0;
- if (__builtin_expect (buflen > 32768, 0))
+ if (__glibc_unlikely (buflen > 32768))
{
char *old_buffer = buffer;
buflen *= 2;
char strdata[0];
} *dataset = NULL;
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
if (he == NULL)
dbg_log (_("Haven't found \"%s\" in group cache!"), (char *) key);
mempool_alloc. */
// XXX This really should use alloca. need to change the backends.
gid_t *groups = (gid_t *) malloc (size * sizeof (gid_t));
- if (__builtin_expect (groups == NULL, 0))
+ if (__glibc_unlikely (groups == NULL))
/* No more memory. */
goto out;
/* In prune_cache we are also using a dynamically allocated array.
If the array in the caller is too large we have malloc'ed it. */
size_t stack_used = sizeof (bool) * db->head->module;
- if (__builtin_expect (stack_used > MAX_STACK_USE, 0))
+ if (__glibc_unlikely (stack_used > MAX_STACK_USE))
stack_used = 0;
size_t nmark = (db->head->first_free / BLOCK_ALIGN + BITS - 1) / BITS;
size_t memory_needed = nmark * sizeof (BITMAP_T);
- if (__builtin_expect (stack_used + memory_needed <= MAX_STACK_USE, 1))
+ if (__glibc_likely (stack_used + memory_needed <= MAX_STACK_USE))
{
mark = (BITMAP_T *) alloca_account (memory_needed, stack_used);
mark_use_malloc = false;
struct hashentry **he;
struct hashentry **he_data;
bool he_use_malloc;
- if (__builtin_expect (stack_used + memory_needed <= MAX_STACK_USE, 1))
+ if (__glibc_likely (stack_used + memory_needed <= MAX_STACK_USE))
{
he = alloca_account (memory_needed, stack_used);
he_use_malloc = false;
}
while (runp != moves->next);
- if (__builtin_expect (debug_level >= 3, 0))
+ if (__glibc_unlikely (debug_level >= 3))
dbg_log (_("freed %zu bytes in %s cache"),
db->head->first_free
- ((char *) moves->to + moves->size - db->data),
db->head->first_free = (char *) moves->to + moves->size - db->data;
/* Consistency check. */
- if (__builtin_expect (debug_level >= 3, 0))
+ if (__glibc_unlikely (debug_level >= 3))
{
for (size_t idx = 0; idx < db->head->module; ++idx)
{
retry:
res = db->data + db->head->first_free;
- if (__builtin_expect (db->head->first_free + len > db->head->data_size, 0))
+ if (__glibc_unlikely (db->head->first_free + len > db->head->data_size))
{
if (! tried_resize)
{
const char *key, uid_t uid, struct hashentry *he,
struct datahead *dh, struct dataset **resultp)
{
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
if (he == NULL)
dbg_log (_("Haven't found \"%s\" in netgroup cache!"), key);
{
struct dataset *newp
= (struct dataset *) mempool_alloc (db, total + req->key_len, 1);
- if (__builtin_expect (newp != NULL, 1))
+ if (__glibc_likely (newp != NULL))
{
/* Adjust pointer into the memory block. */
key_copy = (char *) newp + (key_copy - buffer);
key = (char *) rawmemchr (key, '\0') + 1;
const char *domain = *key++ ? key : NULL;
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
if (he == NULL)
dbg_log (_("Haven't found \"%s (%s,%s,%s)\" in netgroup cache!"),
1);
struct indataset dataset_mem;
bool cacheable = true;
- if (__builtin_expect (dataset == NULL, 0))
+ if (__glibc_unlikely (dataset == NULL))
{
cacheable = false;
dataset = &dataset_mem;
1, 0) != 0, 0))
{
// XXX Best number of rounds?
- if (__builtin_expect (++cnt > 5, 0))
+ if (__glibc_unlikely (++cnt > 5))
return false;
atomic_delay ();
if (map != NO_MAPPING)
{
int now_cycle = map->head->gc_cycle;
- if (__builtin_expect (now_cycle != *gc_cycle, 0))
+ if (__glibc_unlikely (now_cycle != *gc_cycle))
{
/* We might have read inconsistent data. */
*gc_cycle = now_cycle;
int
__nscd_getai (const char *key, struct nscd_ai_result **result, int *h_errnop)
{
- if (__builtin_expect (__nss_have_localdomain >= 0, 0))
+ if (__glibc_unlikely (__nss_have_localdomain >= 0))
{
if (__nss_have_localdomain == 0)
__nss_have_localdomain = getenv ("LOCALDOMAIN") != NULL ? 1 : -1;
}
else
{
- if (__builtin_expect (ai_resp.found == -1, 0))
+ if (__glibc_unlikely (ai_resp.found == -1))
{
/* The daemon does not cache this database. */
__nss_not_use_nscd_hosts = 1;
/* No value found so far. */
*result = NULL;
- if (__builtin_expect (gr_resp.found == -1, 0))
+ if (__glibc_unlikely (gr_resp.found == -1))
{
/* The daemon does not cache this database. */
__nss_not_use_nscd_group = 1;
& (__alignof__ (char *) - 1));
total_len = (align + (1 + gr_resp.gr_mem_cnt) * sizeof (char *)
+ gr_resp.gr_name_len + gr_resp.gr_passwd_len);
- if (__builtin_expect (buflen < total_len, 0))
+ if (__glibc_unlikely (buflen < total_len))
{
no_room:
__set_errno (ERANGE);
if (gr_name == NULL)
{
/* Handle a simple, usual case: no group members. */
- if (__builtin_expect (gr_resp.gr_mem_cnt == 0, 1))
+ if (__glibc_likely (gr_resp.gr_mem_cnt == 0))
{
size_t n = gr_resp.gr_name_len + gr_resp.gr_passwd_len;
if (__builtin_expect (__readall (sock, resultbuf->gr_name, n)
/* Get this data. */
size_t n = __readvall (sock, vec, 2);
- if (__builtin_expect (n != total_len, 0))
+ if (__glibc_unlikely (n != total_len))
goto out_close;
}
}
p += len[cnt];
}
- if (__builtin_expect (gr_name + gr_name_len + total_len > recend, 0))
+ if (__glibc_unlikely (gr_name + gr_name_len + total_len > recend))
{
/* len array might contain garbage during nscd GC cycle,
retry rather than fail in that case. */
retval = -2;
goto out_close;
}
- if (__builtin_expect (total_len > buflen, 0))
+ if (__glibc_unlikely (total_len > buflen))
{
/* len array might contain garbage during nscd GC cycle,
retry rather than fail in that case. */
struct hostent *resultbuf, char *buffer, size_t buflen,
struct hostent **result, int *h_errnop)
{
- if (__builtin_expect (__nss_have_localdomain >= 0, 0))
+ if (__glibc_unlikely (__nss_have_localdomain >= 0))
{
if (__nss_have_localdomain == 0)
__nss_have_localdomain = getenv ("LOCALDOMAIN") != NULL ? 1 : -1;
/* No value found so far. */
*result = NULL;
- if (__builtin_expect (hst_resp.found == -1, 0))
+ if (__glibc_unlikely (hst_resp.found == -1))
{
/* The daemon does not cache this database. */
__nss_not_use_nscd_hosts = 1;
goto out_close;
}
/* See whether this would exceed the buffer capacity. */
- if (__builtin_expect (cp > buffer + buflen, 0))
+ if (__glibc_unlikely (cp > buffer + buflen))
{
/* aliases_len array might contain garbage during nscd GC cycle,
retry rather than fail in that case. */
/* No value found so far. */
*result = NULL;
- if (__builtin_expect (pw_resp.found == -1, 0))
+ if (__glibc_unlikely (pw_resp.found == -1))
{
/* The daemon does not cache this database. */
__nss_not_use_nscd_passwd = 1;
p += pw_resp.pw_shell_len;
ssize_t total = p - buffer;
- if (__builtin_expect (pw_name + total > recend, 0))
+ if (__glibc_unlikely (pw_name + total > recend))
goto out_close;
- if (__builtin_expect (buflen < total, 0))
+ if (__glibc_unlikely (buflen < total))
{
__set_errno (ERANGE);
retval = ERANGE;
{
ssize_t nbytes = __readall (sock, buffer, total);
- if (__builtin_expect (nbytes != total, 0))
+ if (__glibc_unlikely (nbytes != total))
{
/* The `errno' to some value != ERANGE. */
__set_errno (ENOENT);
/* No value found so far. */
*result = NULL;
- if (__builtin_expect (serv_resp.found == -1, 0))
+ if (__glibc_unlikely (serv_resp.found == -1))
{
/* The daemon does not cache this database. */
__nss_not_use_nscd_services = 1;
}
/* See whether this would exceed the buffer capacity. */
- if (__builtin_expect (cp > buf + buflen, 0))
+ if (__glibc_unlikely (cp > buf + buflen))
{
/* aliases_len array might contain garbage during nscd GC cycle,
retry rather than fail in that case. */
ssize_t ret = TEMP_FAILURE_RETRY (__readv (fd, iov, iovcnt));
if (ret <= 0)
{
- if (__builtin_expect (ret == 0 || errno != EAGAIN, 1))
+ if (__glibc_likely (ret == 0 || errno != EAGAIN))
/* A genuine error or no data to read. */
return ret;
ssize_t wres = TEMP_FAILURE_RETRY (__send (sock, &reqdata,
real_sizeof_reqdata,
MSG_NOSIGNAL));
- if (__builtin_expect (wres == (ssize_t) real_sizeof_reqdata, 1))
+ if (__glibc_likely (wres == (ssize_t) real_sizeof_reqdata))
/* We managed to send the request. */
return sock;
int *ip = (void *) CMSG_DATA (cmsg);
mapfd = *ip;
- if (__builtin_expect (n != keylen && n != keylen + sizeof (mapsize), 0))
+ if (__glibc_unlikely (n != keylen && n != keylen + sizeof (mapsize)))
goto out_close;
- if (__builtin_expect (strcmp (resdata, key) != 0, 0))
+ if (__glibc_unlikely (strcmp (resdata, key) != 0))
goto out_close;
- if (__builtin_expect (n == keylen, 0))
+ if (__glibc_unlikely (n == keylen))
{
struct stat64 st;
if (__builtin_expect (fstat64 (mapfd, &st) != 0, 0)
/* The file is large enough, map it now. */
void *mapping = __mmap (NULL, mapsize, PROT_READ, MAP_SHARED, mapfd, 0);
- if (__builtin_expect (mapping != MAP_FAILED, 1))
+ if (__glibc_likely (mapping != MAP_FAILED))
{
/* Check whether the database is correct and up-to-date. */
struct database_pers_head *head = mapping;
ALIGN)
+ head->data_size);
- if (__builtin_expect (mapsize < size, 0))
+ if (__glibc_unlikely (mapsize < size))
goto out_unmap;
/* Allocate a record for the mapping. */
cur = mapptr->mapped;
- if (__builtin_expect (cur != NO_MAPPING, 1))
+ if (__glibc_likely (cur != NO_MAPPING))
{
/* If not mapped or timestamp not updated, request new map. */
if (cur == NULL
cur = __nscd_get_mapping (type, name,
(struct mapped_database **) &mapptr->mapped);
- if (__builtin_expect (cur != NO_MAPPING, 1))
+ if (__glibc_likely (cur != NO_MAPPING))
{
if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0,
0))
}
else
{
- if (__builtin_expect (initgr_resp.found == -1, 0))
+ if (__glibc_unlikely (initgr_resp.found == -1))
{
/* The daemon does not cache this database. */
__nss_not_use_nscd_group = 1;
}
else
{
- if (__builtin_expect (netgroup_resp.found == -1, 0))
+ if (__glibc_unlikely (netgroup_resp.found == -1))
{
/* The daemon does not cache this database. */
__nss_not_use_nscd_netgroup = 1;
retval = innetgroup_resp.result;
else
{
- if (__builtin_expect (innetgroup_resp.found == -1, 0))
+ if (__glibc_unlikely (innetgroup_resp.found == -1))
{
/* The daemon does not cache this database. */
__nss_not_use_nscd_netgroup = 1;
bool use_malloc = false;
int errval = 0;
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
if (he == NULL)
dbg_log (_("Haven't found \"%s\" in password cache!"), keystr);
{
errno = 0;
- if (__builtin_expect (buflen > 32768, 0))
+ if (__glibc_unlikely (buflen > 32768))
{
char *old_buffer = buffer;
buflen *= 2;
cap_free (tmp_caps);
- if (__builtin_expect (res != 0, 0))
+ if (__glibc_unlikely (res != 0))
{
cap_free (new_caps);
dbg_log (_("Failed to drop capabilities"));
bool use_malloc = false;
int errval = 0;
- if (__builtin_expect (debug_level > 0, 0))
+ if (__glibc_unlikely (debug_level > 0))
{
if (he == NULL)
dbg_log (_("Haven't found \"%s\" in services cache!"), key);
{
errno = 0;
- if (__builtin_expect (buflen > 32768, 0))
+ if (__glibc_unlikely (buflen > 32768))
{
char *old_buffer = buffer;
buflen *= 2;
|| ! (parse_result = parse_line (p, result, data, buflen, errnop
EXTRA_ARGS)));
- if (__builtin_expect (parse_result == -1, 0))
+ if (__glibc_unlikely (parse_result == -1))
{
H_ERRNO_SET (NETDB_INTERNAL);
return NSS_STATUS_TRYAGAIN;
#else
n = strlen (pattern);
#endif
- if (__builtin_expect (n < 1024, 1))
+ if (__glibc_likely (n < 1024))
{
wpattern = (wchar_t *) alloca_account ((n + 1) * sizeof (wchar_t),
alloca_used);
n = mbsrtowcs (wpattern, &p, n + 1, &ps);
- if (__builtin_expect (n == (size_t) -1, 0))
+ if (__glibc_unlikely (n == (size_t) -1))
/* Something wrong.
XXX Do we have to set `errno' to something which mbsrtows hasn't
already done? */
{
prepare_wpattern:
n = mbsrtowcs (NULL, &pattern, 0, &ps);
- if (__builtin_expect (n == (size_t) -1, 0))
+ if (__glibc_unlikely (n == (size_t) -1))
/* Something wrong.
XXX Do we have to set `errno' to something which mbsrtows hasn't
already done? */
return -1;
- if (__builtin_expect (n >= (size_t) -1 / sizeof (wchar_t), 0))
+ if (__glibc_unlikely (n >= (size_t) -1 / sizeof (wchar_t)))
{
__set_errno (ENOMEM);
return -2;
n = strlen (string);
#endif
p = string;
- if (__builtin_expect (n < 1024, 1))
+ if (__glibc_likely (n < 1024))
{
wstring = (wchar_t *) alloca_account ((n + 1) * sizeof (wchar_t),
alloca_used);
n = mbsrtowcs (wstring, &p, n + 1, &ps);
- if (__builtin_expect (n == (size_t) -1, 0))
+ if (__glibc_unlikely (n == (size_t) -1))
{
/* Something wrong.
XXX Do we have to set `errno' to something which
{
prepare_wstring:
n = mbsrtowcs (NULL, &string, 0, &ps);
- if (__builtin_expect (n == (size_t) -1, 0))
+ if (__glibc_unlikely (n == (size_t) -1))
/* Something wrong.
XXX Do we have to set `errno' to something which mbsrtows hasn't
already done? */
goto free_return;
- if (__builtin_expect (n >= (size_t) -1 / sizeof (wchar_t), 0))
+ if (__glibc_unlikely (n >= (size_t) -1 / sizeof (wchar_t)))
{
free (wpattern_malloc);
__set_errno (ENOMEM);
fputc_unlocked ('\n', fp);
- if (__builtin_expect (fclose (fp) != EOF, 1))
+ if (__glibc_likely (fclose (fp) != EOF))
{
_IO_flockfile (stderr);
/* It is an illegal expression. */
illegal_brace:
#ifdef _LIBC
- if (__builtin_expect (!alloca_onealt, 0))
+ if (__glibc_unlikely (!alloca_onealt))
#endif
free (onealt);
return glob (pattern, flags & ~GLOB_BRACE, errfunc, pglob);
if (result && result != GLOB_NOMATCH)
{
#ifdef _LIBC
- if (__builtin_expect (!alloca_onealt, 0))
+ if (__glibc_unlikely (!alloca_onealt))
#endif
free (onealt);
if (!(flags & GLOB_APPEND))
}
#ifdef _LIBC
- if (__builtin_expect (!alloca_onealt, 0))
+ if (__glibc_unlikely (!alloca_onealt))
#endif
free (onealt);
}
else
{
- if (__builtin_expect (pattern[0] == '\0', 0))
+ if (__glibc_unlikely (pattern[0] == '\0'))
{
dirs.gl_pathv = NULL;
goto no_matches;
2 * pwbuflen);
if (newp == NULL)
{
- if (__builtin_expect (malloc_pwtmpbuf, 0))
+ if (__glibc_unlikely (malloc_pwtmpbuf))
free (pwtmpbuf);
retval = GLOB_NOSPACE;
goto out;
{
if (flags & GLOB_TILDE_CHECK)
{
- if (__builtin_expect (malloc_home_dir, 0))
+ if (__glibc_unlikely (malloc_home_dir))
free (home_dir);
retval = GLOB_NOMATCH;
goto out;
/* Now construct the full directory. */
if (dirname[1] == '\0')
{
- if (__builtin_expect (malloc_dirname, 0))
+ if (__glibc_unlikely (malloc_dirname))
free (dirname);
dirname = home_dir;
newp = malloc (home_len + dirlen);
if (newp == NULL)
{
- if (__builtin_expect (malloc_home_dir, 0))
+ if (__glibc_unlikely (malloc_home_dir))
free (home_dir);
retval = GLOB_NOSPACE;
goto out;
mempcpy (mempcpy (newp, home_dir, home_len),
&dirname[1], dirlen);
- if (__builtin_expect (malloc_dirname, 0))
+ if (__glibc_unlikely (malloc_dirname))
free (dirname);
dirname = newp;
if (pwtmpbuf == NULL)
{
nomem_getpw:
- if (__builtin_expect (malloc_user_name, 0))
+ if (__glibc_unlikely (malloc_user_name))
free (user_name);
retval = GLOB_NOSPACE;
goto out;
2 * buflen);
if (newp == NULL)
{
- if (__builtin_expect (malloc_pwtmpbuf, 0))
+ if (__glibc_unlikely (malloc_pwtmpbuf))
free (pwtmpbuf);
goto nomem_getpw;
}
p = getpwnam (user_name);
# endif
- if (__builtin_expect (malloc_user_name, 0))
+ if (__glibc_unlikely (malloc_user_name))
free (user_name);
/* If we found a home directory use this. */
size_t home_len = strlen (p->pw_dir);
size_t rest_len = end_name == NULL ? 0 : strlen (end_name);
- if (__builtin_expect (malloc_dirname, 0))
+ if (__glibc_unlikely (malloc_dirname))
free (dirname);
malloc_dirname = 0;
dirname = malloc (home_len + rest_len + 1);
if (dirname == NULL)
{
- if (__builtin_expect (malloc_pwtmpbuf, 0))
+ if (__glibc_unlikely (malloc_pwtmpbuf))
free (pwtmpbuf);
retval = GLOB_NOSPACE;
goto out;
dirlen = home_len + rest_len;
dirname_modified = 1;
- if (__builtin_expect (malloc_pwtmpbuf, 0))
+ if (__glibc_unlikely (malloc_pwtmpbuf))
free (pwtmpbuf);
}
else
{
- if (__builtin_expect (malloc_pwtmpbuf, 0))
+ if (__glibc_unlikely (malloc_pwtmpbuf))
free (pwtmpbuf);
if (flags & GLOB_TILDE_CHECK)
*(char *) &dirname[--dirlen] = '\0';
}
- if (__builtin_expect ((flags & GLOB_ALTDIRFUNC) != 0, 0))
+ if (__glibc_unlikely ((flags & GLOB_ALTDIRFUNC) != 0))
{
/* Use the alternative access functions also in the recursive
call. */
}
out:
- if (__builtin_expect (malloc_dirname, 0))
+ if (__glibc_unlikely (malloc_dirname))
free (dirname);
return retval;
of the function to copy this name into the result. */
flags |= GLOB_NOCHECK;
- if (__builtin_expect (!alloca_fullname, 0))
+ if (__glibc_unlikely (!alloca_fullname))
free (fullname);
}
else
}
d64buf;
- if (__builtin_expect (flags & GLOB_ALTDIRFUNC, 0))
+ if (__glibc_unlikely (flags & GLOB_ALTDIRFUNC))
{
struct dirent *d32 = (*pglob->gl_readdir) (stream);
if (d32 != NULL)
if (stream != NULL)
{
save = errno;
- if (__builtin_expect (flags & GLOB_ALTDIRFUNC, 0))
+ if (__glibc_unlikely (flags & GLOB_ALTDIRFUNC))
(*pglob->gl_closedir) (stream);
else
closedir (stream);
args[1] = "-nc";
/* Redirect output. */
- if (__builtin_expect (fildes[1] != STDOUT_FILENO, 1))
+ if (__glibc_likely (fildes[1] != STDOUT_FILENO))
{
__dup2 (fildes[1], STDOUT_FILENO);
__close (fildes[1]);
buffer += pad;
buflen = buflen > pad ? buflen - pad : 0;
- if (__builtin_expect (buflen < sizeof (struct host_data), 0))
+ if (__glibc_unlikely (buflen < sizeof (struct host_data)))
{
*errnop = ERANGE;
*h_errnop = NETDB_INTERNAL;
break;
case AF_INET6:
/* Only lookup with the byte string format if the user wants it. */
- if (__builtin_expect (_res.options & RES_USEBSTRING, 0))
+ if (__glibc_unlikely (_res.options & RES_USEBSTRING))
{
qp = stpcpy (qbuf, "\\[x");
for (n = 0; n < IN6ADDRSZ; ++n)
int have_to_map = 0;
uintptr_t pad = -(uintptr_t) buffer % __alignof__ (struct host_data);
buffer += pad;
- if (__builtin_expect (buflen < sizeof (struct host_data) + pad, 0))
+ if (__glibc_unlikely (buflen < sizeof (struct host_data) + pad))
{
/* The buffer is too small. */
too_small:
n = -1;
}
- if (__builtin_expect (n < 0 || (*name_ok) (bp) == 0, 0))
+ if (__glibc_unlikely (n < 0 || (*name_ok) (bp) == 0))
{
++had_error;
continue;
}
cp += n; /* name */
- if (__builtin_expect (cp + 10 > end_of_message, 0))
+ if (__glibc_unlikely (cp + 10 > end_of_message))
{
++had_error;
continue;
cp += INT32SZ; /* TTL */
n = __ns_get16 (cp);
cp += INT16SZ; /* len */
- if (__builtin_expect (class != C_IN, 0))
+ if (__glibc_unlikely (class != C_IN))
{
/* XXX - debug? syslog? */
cp += n;
if (ap >= &host_data->aliases[MAX_NR_ALIASES - 1])
continue;
n = dn_expand (answer->buf, end_of_message, cp, tbuf, sizeof tbuf);
- if (__builtin_expect (n < 0 || (*name_ok) (tbuf) == 0, 0))
+ if (__glibc_unlikely (n < 0 || (*name_ok) (tbuf) == 0))
{
++had_error;
continue;
linebuflen -= n;
/* Get canonical name. */
n = strlen (tbuf) + 1; /* For the \0. */
- if (__builtin_expect (n > linebuflen, 0))
+ if (__glibc_unlikely (n > linebuflen))
goto too_small;
if (__builtin_expect (n, 0) >= MAXHOSTNAMELEN)
{
if (qtype == T_PTR && type == T_CNAME)
{
n = dn_expand (answer->buf, end_of_message, cp, tbuf, sizeof tbuf);
- if (__builtin_expect (n < 0 || res_dnok (tbuf) == 0, 0))
+ if (__glibc_unlikely (n < 0 || res_dnok (tbuf) == 0))
{
++had_error;
continue;
cp += n;
/* Get canonical name. */
n = strlen (tbuf) + 1; /* For the \0. */
- if (__builtin_expect (n > linebuflen, 0))
+ if (__glibc_unlikely (n > linebuflen))
goto too_small;
if (__builtin_expect (n, 0) >= MAXHOSTNAMELEN)
{
if (type == T_A && qtype == T_AAAA && map)
have_to_map = 1;
- else if (__builtin_expect (type != qtype, 0))
+ else if (__glibc_unlikely (type != qtype))
{
syslog (LOG_NOTICE | LOG_AUTH,
"gethostby*.getanswer: asked for \"%s %s %s\", got type \"%s\"",
switch (type)
{
case T_PTR:
- if (__builtin_expect (strcasecmp (tname, bp) != 0, 0))
+ if (__glibc_unlikely (strcasecmp (tname, bp) != 0))
{
syslog (LOG_NOTICE | LOG_AUTH, AskedForGot, qname, bp);
cp += n;
n = -1;
}
- if (__builtin_expect (n < 0 || res_hnok (bp) == 0, 0))
+ if (__glibc_unlikely (n < 0 || res_hnok (bp) == 0))
{
++had_error;
break;
if (have_to_map)
{
n = strlen (bp) + 1; /* for the \0 */
- if (__builtin_expect (n >= MAXHOSTNAMELEN, 0))
+ if (__glibc_unlikely (n >= MAXHOSTNAMELEN))
{
++had_error;
break;
linebuflen -= sizeof (align) - ((u_long) bp % sizeof (align));
bp += sizeof (align) - ((u_long) bp % sizeof (align));
- if (__builtin_expect (n > linebuflen, 0))
+ if (__glibc_unlikely (n > linebuflen))
goto too_small;
bp = __mempcpy (*hap++ = bp, cp, n);
cp += n;
int qdcount = ntohs (hp->qdcount);
const u_char *cp = answer->buf + HFIXEDSZ;
const u_char *end_of_message = answer->buf + anslen;
- if (__builtin_expect (qdcount != 1, 0))
+ if (__glibc_unlikely (qdcount != 1))
{
*h_errnop = NO_RECOVERY;
return NSS_STATUS_UNAVAIL;
n = -1;
}
- if (__builtin_expect (n < 0 || res_hnok (buffer) == 0, 0))
+ if (__glibc_unlikely (n < 0 || res_hnok (buffer) == 0))
{
++had_error;
continue;
cp += n; /* name */
- if (__builtin_expect (cp + 10 > end_of_message, 0))
+ if (__glibc_unlikely (cp + 10 > end_of_message))
{
++had_error;
continue;
*ttlp = ttl;
n = dn_expand (answer->buf, end_of_message, cp, tbuf, sizeof tbuf);
- if (__builtin_expect (n < 0 || res_hnok (tbuf) == 0, 0))
+ if (__glibc_unlikely (n < 0 || res_hnok (tbuf) == 0))
{
++had_error;
continue;
}
n = strlen (tbuf) + 1;
- if (__builtin_expect (n > buflen, 0))
+ if (__glibc_unlikely (n > buflen))
goto too_small;
- if (__builtin_expect (n >= MAXHOSTNAMELEN, 0))
+ if (__glibc_unlikely (n >= MAXHOSTNAMELEN))
{
++had_error;
continue;
// We should not see any types other than those explicitly listed
// below. Some types sent by server seem missing, though. Just
// collect the data for now.
- if (__builtin_expect (type != T_A && type != T_AAAA, 0))
+ if (__glibc_unlikely (type != T_A && type != T_AAAA))
#else
if (__builtin_expect (type == T_SIG, 0)
|| __builtin_expect (type == T_KEY, 0)
uintptr_t pad = -(uintptr_t) buffer % __alignof__ (struct net_data);
buffer += pad;
- if (__builtin_expect (buflen < sizeof (*net_data) + pad, 0))
+ if (__glibc_unlikely (buflen < sizeof (*net_data) + pad))
{
/* The buffer is too small. */
too_small:
sa6->sin6_flowinfo = 0;
sa6->sin6_addr = a6;
- if (__builtin_expect (el == NULL, 1))
+ if (__glibc_likely (el == NULL))
sa6->sin6_scope_id = 0;
else {
int try_numericscope = 1;
n = ns_name_compress((char *)data, cp, buflen,
(const u_char **) dnptrs,
(const u_char **) lastdnptr);
- if (__builtin_expect (n < 0, 0))
+ if (__glibc_unlikely (n < 0))
return (-1);
cp += n;
buflen -= n;
/*
* Initialize answer section
*/
- if (__builtin_expect (buflen < 1 + RRFIXEDSZ + datalen, 0))
+ if (__glibc_unlikely (buflen < 1 + RRFIXEDSZ + datalen))
return (-1);
*cp++ = '\0'; /* no domain name */
NS_PUT16 (type, cp);
goto again;
}
}
- if (__builtin_expect (n <= 0, 0)) {
+ if (__glibc_unlikely (n <= 0)) {
/* If the query choked with EDNS0, retry without EDNS0. */
if ((statp->options & (RES_USE_EDNS0|RES_USE_DNSSEC)) != 0
&& ((oflags ^ statp->_flags) & RES_F_EDNS0ERR) != 0) {
}
#ifdef USE_HOOKS
- if (__builtin_expect (statp->qhook || statp->rhook, 0)) {
+ if (__glibc_unlikely (statp->qhook || statp->rhook)) {
if (anssiz < MAXPACKET && ansp) {
u_char *buf = malloc (MAXPACKET);
if (buf == NULL)
goto next_ns;
same_ns:
#ifdef USE_HOOKS
- if (__builtin_expect (statp->qhook != NULL, 0)) {
+ if (__glibc_unlikely (statp->qhook != NULL)) {
int done = 0, loops = 0;
do {
: &((struct sockaddr_in *) nsap)->sin_addr),
tmpbuf, sizeof (tmpbuf))));
- if (__builtin_expect (v_circuit, 0)) {
+ if (__glibc_unlikely (v_circuit)) {
/* Use VC; at most one attempt per server. */
try = statp->retry;
n = send_vc(statp, buf, buflen, buf2, buflen2,
__res_iclose(statp, false);
}
#ifdef USE_HOOKS
- if (__builtin_expect (statp->rhook, 0)) {
+ if (__glibc_unlikely (statp->rhook)) {
int done = 0, loops = 0;
do {
if (rlen > *thisanssizp) {
/* Yes, we test ANSCP here. If we have two buffers
both will be allocatable. */
- if (__builtin_expect (anscp != NULL, 1)) {
+ if (__glibc_likely (anscp != NULL)) {
u_char *newp = malloc (MAXPACKET);
if (newp == NULL) {
*terrno = ENOMEM;
} else
len = rlen;
- if (__builtin_expect (len < HFIXEDSZ, 0)) {
+ if (__glibc_unlikely (len < HFIXEDSZ)) {
/*
* Undersized message.
*/
cp += n;
len -= n;
}
- if (__builtin_expect (n <= 0, 0)) {
+ if (__glibc_unlikely (n <= 0)) {
*terrno = errno;
Perror(statp, stderr, "read(vc)", errno);
__res_iclose(statp, false);
return (0);
}
- if (__builtin_expect (truncating, 0)) {
+ if (__glibc_unlikely (truncating)) {
/*
* Flush rest of answer so connection stays in synch.
*/
/* only try IPv6 if IPv6 NS and if not failed before */
if (nsap->sa_family == AF_INET6 && !statp->ipv6_unavail) {
- if (__builtin_expect (__have_o_nonblock >= 0, 1)) {
+ if (__glibc_likely (__have_o_nonblock >= 0)) {
EXT(statp).nssocks[ns] =
socket(PF_INET6, SOCK_DGRAM|SOCK_NONBLOCK,
0);
&& errno == EINVAL ? -1 : 1);
#endif
}
- if (__builtin_expect (__have_o_nonblock < 0, 0))
+ if (__glibc_unlikely (__have_o_nonblock < 0))
EXT(statp).nssocks[ns] =
socket(PF_INET6, SOCK_DGRAM, 0);
if (EXT(statp).nssocks[ns] < 0)
statp->ipv6_unavail = errno == EAFNOSUPPORT;
slen = sizeof (struct sockaddr_in6);
} else if (nsap->sa_family == AF_INET) {
- if (__builtin_expect (__have_o_nonblock >= 0, 1)) {
+ if (__glibc_likely (__have_o_nonblock >= 0)) {
EXT(statp).nssocks[ns]
= socket(PF_INET, SOCK_DGRAM|SOCK_NONBLOCK,
0);
&& errno == EINVAL ? -1 : 1);
#endif
}
- if (__builtin_expect (__have_o_nonblock < 0, 0))
+ if (__glibc_unlikely (__have_o_nonblock < 0))
EXT(statp).nssocks[ns]
= socket(PF_INET, SOCK_DGRAM, 0);
slen = sizeof (struct sockaddr_in);
__res_iclose(statp, false);
return (0);
}
- if (__builtin_expect (__have_o_nonblock < 0, 0)) {
+ if (__glibc_unlikely (__have_o_nonblock < 0)) {
/* Make socket non-blocking. */
int fl = __fcntl (EXT(statp).nssocks[ns], F_GETFL);
if (fl != -1)
n = 0;
if (nwritten == 0)
n = __poll (pfd, 1, 0);
- if (__builtin_expect (n == 0, 0)) {
+ if (__glibc_unlikely (n == 0)) {
n = __poll (pfd, 1, ptimeout);
need_recompute = 1;
}
reqs[1].msg_hdr.msg_controllen = 0;
int ndg = __sendmmsg (pfd[0].fd, reqs, 2, MSG_NOSIGNAL);
- if (__builtin_expect (ndg == 2, 1))
+ if (__glibc_likely (ndg == 2))
{
if (reqs[0].msg_len != buflen
|| reqs[1].msg_len != buflen2)
else
{
#ifndef __ASSUME_SENDMMSG
- if (__builtin_expect (have_sendmmsg == 0, 0))
+ if (__glibc_unlikely (have_sendmmsg == 0))
{
if (ndg < 0 && errno == ENOSYS)
{
*thisresplenp = recvfrom(pfd[0].fd, (char*)*thisansp,
*thisanssizp, 0,
(struct sockaddr *)&from, &fromlen);
- if (__builtin_expect (*thisresplenp <= 0, 0)) {
+ if (__glibc_unlikely (*thisresplenp <= 0)) {
if (errno == EINTR || errno == EAGAIN) {
need_recompute = 1;
goto wait;
goto err_out;
}
*gotsomewhere = 1;
- if (__builtin_expect (*thisresplenp < HFIXEDSZ, 0)) {
+ if (__glibc_unlikely (*thisresplenp < HFIXEDSZ)) {
/*
* Undersized message.
*/
wint_t wthousands = __towctrans (L',', map);
#ifndef COMPILE_WPRINTF
- if (__builtin_expect (map != NULL, 0))
+ if (__glibc_unlikely (map != NULL))
{
mbstate_t state;
memset (&state, '\0', sizeof (state));
|| (fd = __dup (fd)) == -1
|| (fp = fdopen (fd, "w+")) == NULL)
{
- if (__builtin_expect (fd != -1, 0))
+ if (__glibc_unlikely (fd != -1))
__close (fd);
/* Use standard error as is. */
/* Write the exponent if it is needed. */
if (type != 'f')
{
- if (__builtin_expect (expsign != 0 && exponent == 4 && spec == 'g', 0))
+ if (__glibc_unlikely (expsign != 0 && exponent == 4 && spec == 'g'))
{
/* This is another special case. The exponent of the number is
really smaller than -4, which requires the 'e'/'E' format.
size_t nbuffer = (2 + chars_needed * factor + decimal_len
+ ngroups * thousands_sep_len);
- if (__builtin_expect (buffer_malloced, 0))
+ if (__glibc_unlikely (buffer_malloced))
{
buffer = (char *) malloc (nbuffer);
if (buffer == NULL)
}
tmpptr = buffer;
- if (__builtin_expect (info->i18n, 0))
+ if (__glibc_unlikely (info->i18n))
{
#ifdef COMPILE_WPRINTF
wstartp = _i18n_number_rewrite (wstartp, wcp,
PRINT (tmpptr, wstartp, wide ? wcp - wstartp : cp - tmpptr);
/* Free the memory if necessary. */
- if (__builtin_expect (buffer_malloced, 0))
+ if (__glibc_unlikely (buffer_malloced))
{
free (buffer);
free (wbuffer);
0))
return NULL;
- if (__builtin_expect (__gen_tempname (tmpbuf, 0, 0, __GT_NOCREATE), 0))
+ if (__glibc_unlikely (__gen_tempname (tmpbuf, 0, 0, __GT_NOCREATE)))
return NULL;
if (s == NULL)
#define ADDW(Ch) \
do \
{ \
- if (__builtin_expect (wpsize == wpmax, 0)) \
+ if (__glibc_unlikely (wpsize == wpmax)) \
{ \
CHAR_T *old = wp; \
size_t newsize = (UCHAR_MAX + 1 > 2 * wpmax \
do
{
c = inchar ();
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
input_error ();
else if (c != (unsigned char) *f++)
{
c = inchar ();
/* Characters other than format specs must just match. */
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
input_error ();
/* We saw white space char as the last character in the format
if (skip_space)
{
while (ISSPACE (c))
- if (__builtin_expect (inchar () == EOF, 0))
+ if (__glibc_unlikely (inchar () == EOF))
input_error ();
skip_space = 0;
}
- if (__builtin_expect (c != fc, 0))
+ if (__glibc_unlikely (c != fc))
{
ungetc (c, s);
conv_error ();
}
/* End of the format string? */
- if (__builtin_expect (*f == L_('\0'), 0))
+ if (__glibc_unlikely (*f == L_('\0')))
conv_error ();
/* Find the conversion specifier. */
{
case L_('%'): /* Must match a literal '%'. */
c = inchar ();
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
input_error ();
- if (__builtin_expect (c != fc, 0))
+ if (__glibc_unlikely (c != fc))
{
ungetc_not_eof (c, s);
conv_error ();
#endif
c = inchar ();
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
input_error ();
#ifdef COMPILE_WSCANF
}
n = __wcrtomb (!(flags & SUPPRESS) ? str : NULL, c, &state);
- if (__builtin_expect (n == (size_t) -1, 0))
+ if (__glibc_unlikely (n == (size_t) -1))
/* No valid wide character. */
input_error ();
STRING_ARG (wstr, wchar_t, (width > 1024 ? 1024 : width));
c = inchar ();
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
input_error ();
#ifdef COMPILE_WSCANF
{
/* Possibly correct character, just not enough
input. */
- if (__builtin_expect (inchar () == EOF, 0))
+ if (__glibc_unlikely (inchar () == EOF))
encode_error ();
buf[0] = c;
continue;
}
- if (__builtin_expect (n != 1, 0))
+ if (__glibc_unlikely (n != 1))
encode_error ();
/* We have a match. */
STRING_ARG (str, char, 100);
c = inchar ();
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
input_error ();
#ifdef COMPILE_WSCANF
n = __wcrtomb (!(flags & SUPPRESS) ? str : NULL, c,
&state);
- if (__builtin_expect (n == (size_t) -1, 0))
+ if (__glibc_unlikely (n == (size_t) -1))
encode_error ();
assert (n <= MB_CUR_MAX);
{
/* Possibly correct character, just not enough
input. */
- if (__builtin_expect (inchar () == EOF, 0))
+ if (__glibc_unlikely (inchar () == EOF))
encode_error ();
buf[0] = c;
continue;
}
- if (__builtin_expect (n != 1, 0))
+ if (__glibc_unlikely (n != 1))
encode_error ();
/* We have a match. */
number:
c = inchar ();
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
input_error ();
/* Check for a sign. */
#endif
/* Get the alternative digit forms if there are any. */
- if (__builtin_expect (map != NULL, 0))
+ if (__glibc_unlikely (map != NULL))
{
/* Adding new level for extra digits set in locale file. */
++to_level;
{
/* Get the string for the digits with value N. */
#ifdef COMPILE_WSCANF
- if (__builtin_expect (map != NULL, 0))
+ if (__glibc_unlikely (map != NULL))
wcdigits[n] = wcdigits_extended[n];
else
wcdigits[n] = (const wchar_t *)
const char *cmpp;
int avail = width > 0 ? width : INT_MAX;
- if (__builtin_expect (map != NULL, 0))
+ if (__glibc_unlikely (map != NULL))
mbdigits[n] = mbdigits_extended[n];
else
mbdigits[n]
else
num.ul = __strtoul_internal (wp, &tw, base, flags & GROUP);
}
- if (__builtin_expect (wp == tw, 0))
+ if (__glibc_unlikely (wp == tw))
conv_error ();
if (!(flags & SUPPRESS))
c = inchar ();
if (width > 0)
--width;
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
input_error ();
got_digit = got_dot = got_e = 0;
if (c == L_('-') || c == L_('+'))
{
negative = c == L_('-');
- if (__builtin_expect (width == 0 || inchar () == EOF, 0))
+ if (__glibc_unlikely (width == 0 || inchar () == EOF))
/* EOF is only an input error before we read any chars. */
conv_error ();
if (width > 0)
*ARG (float *) = negative ? -d : d;
}
- if (__builtin_expect (tw == wp, 0))
+ if (__glibc_unlikely (tw == wp))
conv_error ();
if (!(flags & SUPPRESS))
while ((fc = *f++) != L'\0' && fc != L']');
- if (__builtin_expect (fc == L'\0', 0))
+ if (__glibc_unlikely (fc == L'\0'))
conv_error ();
wchar_t *twend = (wchar_t *) f - 1;
#else
/* Add the character to the flag map. */
wp[fc] = 1;
- if (__builtin_expect (fc == '\0', 0))
+ if (__glibc_unlikely (fc == '\0'))
conv_error();
#endif
{
size_t now = read_in;
#ifdef COMPILE_WSCANF
- if (__builtin_expect (inchar () == WEOF, 0))
+ if (__glibc_unlikely (inchar () == WEOF))
input_error ();
do
size_t cnt = 0;
mbstate_t cstate;
- if (__builtin_expect (inchar () == EOF, 0))
+ if (__glibc_unlikely (inchar () == EOF))
input_error ();
memset (&cstate, '\0', sizeof (cstate));
}
while (inchar () != EOF);
- if (__builtin_expect (cnt != 0, 0))
+ if (__glibc_unlikely (cnt != 0))
/* We stopped in the middle of recognizing another
character. That's a problem. */
encode_error ();
#endif
- if (__builtin_expect (now == read_in, 0))
+ if (__glibc_unlikely (now == read_in))
/* We haven't succesfully read any character. */
conv_error ();
{
size_t now = read_in;
- if (__builtin_expect (inchar () == EOF, 0))
+ if (__glibc_unlikely (inchar () == EOF))
input_error ();
#ifdef COMPILE_WSCANF
}
n = __wcrtomb (!(flags & SUPPRESS) ? str : NULL, c, &state);
- if (__builtin_expect (n == (size_t) -1, 0))
+ if (__glibc_unlikely (n == (size_t) -1))
encode_error ();
assert (n <= MB_CUR_MAX);
while (--width > 0 && inchar () != EOF);
#endif
- if (__builtin_expect (now == read_in, 0))
+ if (__glibc_unlikely (now == read_in))
/* We haven't succesfully read any character. */
conv_error ();
if (errp != NULL)
*errp |= errval;
- if (__builtin_expect (done == EOF, 0))
+ if (__glibc_unlikely (done == EOF))
{
- if (__builtin_expect (ptrs_to_free != NULL, 0))
+ if (__glibc_unlikely (ptrs_to_free != NULL))
{
struct ptrs_to_free *p = ptrs_to_free;
while (p != NULL)
}
}
}
- else if (__builtin_expect (strptr != NULL, 0))
+ else if (__glibc_unlikely (strptr != NULL))
{
free (*strptr);
*strptr = NULL;
/* It is possible that that last exit function registered
more exit functions. Start the loop over. */
- if (__builtin_expect (check != __new_exitfn_called, 0))
+ if (__glibc_unlikely (check != __new_exitfn_called))
goto restart;
}
}
/* See if we already encountered the DSO. */
__rtld_lock_lock_recursive (GL(dl_load_lock));
- if (__builtin_expect (dso_symbol_cache != dso_symbol, 0))
+ if (__glibc_unlikely (dso_symbol_cache != dso_symbol))
{
ElfW(Addr) caller = (ElfW(Addr)) dso_symbol;
uint64_t result;
/* Initialize buffer, if not yet done. */
- if (__builtin_expect (!buffer->__init, 0))
+ if (__glibc_unlikely (!buffer->__init))
{
buffer->__a = 0x5deece66dull;
buffer->__c = 0xb;
#endif
int result = __add_to_environ (name, NULL, string, 1);
- if (__builtin_expect (use_malloc, 0))
+ if (__glibc_unlikely (use_malloc))
free (name);
return result;
# endif
np = KNOWN_VALUE (new_value);
- if (__builtin_expect (np == NULL, 1))
+ if (__glibc_likely (np == NULL))
#endif
{
#ifdef USE_TSEARCH
- if (__builtin_expect (! use_alloca, 0))
+ if (__glibc_unlikely (! use_alloca))
np = new_value;
else
#endif
{
np = malloc (varlen);
- if (__builtin_expect (np == NULL, 0))
+ if (__glibc_unlikely (np == NULL))
{
UNLOCK;
return -1;
struct __locale_data *current = loc->__locales[LC_NUMERIC];
- if (__builtin_expect (group, 0))
+ if (__glibc_unlikely (group))
{
grouping = _NL_CURRENT (LC_NUMERIC, GROUPING);
if (*grouping <= 0 || *grouping == CHAR_MAX)
while (c == L'0' || ((wint_t) thousands != L'\0' && c == (wint_t) thousands))
c = *++cp;
#else
- if (__builtin_expect (thousands == NULL, 1))
+ if (__glibc_likely (thousands == NULL))
while (c == '0')
c = *++cp;
else
/* Not a digit or separator: end of the integer part. */
break;
#else
- if (__builtin_expect (thousands == NULL, 1))
+ if (__glibc_likely (thousands == NULL))
break;
else
{
exponent -= incr;
}
- if (__builtin_expect (exponent > MAX_10_EXP + 1 - (intmax_t) int_no, 0))
+ if (__glibc_unlikely (exponent > MAX_10_EXP + 1 - (intmax_t) int_no))
return overflow_value (negative);
- if (__builtin_expect (exponent < MIN_10_EXP - (DIG + 1), 0))
+ if (__glibc_unlikely (exponent < MIN_10_EXP - (DIG + 1)))
return underflow_value (negative);
if (int_no > 0)
/* Now we know the exponent of the number in base two.
Check it against the maximum possible exponent. */
- if (__builtin_expect (bits > MAX_EXP, 0))
+ if (__glibc_unlikely (bits > MAX_EXP))
return overflow_value (negative);
/* We have already the first BITS bits of the result. Together with
in the format described in <locale.h>. */
const char *grouping;
- if (__builtin_expect (group, 0))
+ if (__glibc_unlikely (group))
{
grouping = _NL_CURRENT (LC_NUMERIC, GROUPING);
if (*grouping <= 0 || *grouping == CHAR_MAX)
/* Skip white space. */
while (ISSPACE (*s))
++s;
- if (__builtin_expect (*s == L_('\0'), 0))
+ if (__glibc_unlikely (*s == L_('\0')))
goto noconv;
/* Check for a sign. */
if (base != 10)
grouping = NULL;
- if (__builtin_expect (grouping != NULL, 0))
+ if (__glibc_unlikely (grouping != NULL))
{
# ifndef USE_WIDE_CHAR
thousands_len = strlen (thousands);
overflow = 1;
#endif
- if (__builtin_expect (overflow, 0))
+ if (__glibc_unlikely (overflow))
{
__set_errno (ERANGE);
#if UNSIGNED
/* Sanity check, otherwise the loop might search through the whole
memory. */
- if (__builtin_expect (haystack_len < needle_len, 0))
+ if (__glibc_unlikely (haystack_len < needle_len))
return NULL;
/* Use optimizations in memchr when possible, to reduce the search
char *ret = __strerror_r (errnum, NULL, 0);
int saved_errno;
- if (__builtin_expect (ret != NULL, 1))
+ if (__glibc_likely (ret != NULL))
return ret;
saved_errno = errno;
if (buf == NULL)
if (maxlen == 0)
return 0;
- if (__builtin_expect (end_ptr < str, 0))
+ if (__glibc_unlikely (end_ptr < str))
end_ptr = (const char *) ~0UL;
/* Handle the first few characters by reading one character at a time.
/* Sanity check, otherwise the loop might search through the whole
memory. */
- if (__builtin_expect (haystack_len < needle_len, 0))
+ if (__glibc_unlikely (haystack_len < needle_len))
return NULL;
for (begin = (const char *) haystack; begin <= last_possible; ++begin)
# endif
}
#endif
- if (__builtin_expect (*sockp < 0, 0))
+ if (__glibc_unlikely (*sockp < 0))
{
struct rpc_createerr *ce = &get_rpc_createerr ();
ce->cf_stat = RPC_SYSTEMERROR;
laddr.sin_addr.s_addr = htonl (INADDR_ANY);
int cc = __bind (so, (struct sockaddr *) &laddr, namelen);
- if (__builtin_expect (cc < 0, 0))
+ if (__glibc_unlikely (cc < 0))
{
fail:
__close (so);
}
cc = __connect (so, (struct sockaddr *) saddr, namelen);
- if (__builtin_expect (cc < 0, 0))
+ if (__glibc_unlikely (cc < 0))
goto fail;
return so;
void
_Unwind_Resume (struct _Unwind_Exception *exc)
{
- if (__builtin_expect (libgcc_s_resume == NULL, 0))
+ if (__glibc_unlikely (libgcc_s_resume == NULL))
init ();
libgcc_s_resume (exc);
}
struct _Unwind_Exception *ue_header,
struct _Unwind_Context *context)
{
- if (__builtin_expect (libgcc_s_personality == NULL, 0))
+ if (__glibc_unlikely (libgcc_s_personality == NULL))
init ();
return libgcc_s_personality (version, actions, exception_class,
ue_header, context);
Elf32_Addr *const reloc_addr = (void *) reloc->r_offset;
const unsigned long int r_type = ELF32_R_TYPE (reloc->r_info);
- if (__builtin_expect (r_type == R_386_IRELATIVE, 1))
+ if (__glibc_likely (r_type == R_386_IRELATIVE))
{
Elf32_Addr value = elf_ifunc_invoke(*reloc_addr);
*reloc_addr = value;
to intercept the calls to collect information. In this case we
don't store the address in the GOT so that all future calls also
end in this function. */
- if (__builtin_expect (profile, 0))
+ if (__glibc_unlikely (profile))
{
got[2] = (Elf32_Addr) &_dl_runtime_profile;
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
# if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
- if (__builtin_expect (r_type == R_386_RELATIVE, 0))
+ if (__glibc_unlikely (r_type == R_386_RELATIVE))
{
# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
/* This is defined in rtld.c, but nowhere in the static libc.a;
*reloc_addr += map->l_addr;
}
# ifndef RTLD_BOOTSTRAP
- else if (__builtin_expect (r_type == R_386_NONE, 0))
+ else if (__glibc_unlikely (r_type == R_386_NONE))
return;
# endif
else
Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
/* Check for unexpected PLT reloc type. */
- if (__builtin_expect (r_type == R_386_JMP_SLOT, 1))
+ if (__glibc_likely (r_type == R_386_JMP_SLOT))
{
if (__builtin_expect (map->l_mach.plt, 0) == 0)
*reloc_addr += l_addr;
*reloc_addr = (map->l_mach.plt
+ (((Elf32_Addr) reloc_addr) - map->l_mach.gotplt) * 4);
}
- else if (__builtin_expect (r_type == R_386_TLS_DESC, 1))
+ else if (__glibc_likely (r_type == R_386_TLS_DESC))
{
struct tlsdesc volatile * __attribute__((__unused__)) td =
(struct tlsdesc volatile *)reloc_addr;
# endif
}
}
- else if (__builtin_expect (r_type == R_386_IRELATIVE, 0))
+ else if (__glibc_unlikely (r_type == R_386_IRELATIVE))
{
Elf32_Addr value = map->l_addr + *reloc_addr;
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = ((Elf32_Addr (*) (void)) value) ();
*reloc_addr = value;
}
{
Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
- if (__builtin_expect (r_type == R_386_JMP_SLOT, 1))
+ if (__glibc_likely (r_type == R_386_JMP_SLOT))
;
- else if (__builtin_expect (r_type == R_386_TLS_DESC, 1))
+ else if (__glibc_likely (r_type == R_386_TLS_DESC))
{
struct tlsdesc volatile * __attribute__((__unused__)) td =
(struct tlsdesc volatile *)reloc_addr;
td->arg = (void*)reloc;
td->entry = _dl_tlsdesc_resolve_rela;
}
- else if (__builtin_expect (r_type == R_386_IRELATIVE, 0))
+ else if (__glibc_unlikely (r_type == R_386_IRELATIVE))
{
Elf32_Addr value = map->l_addr + reloc->r_addend;
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = ((Elf32_Addr (*) (void)) value) ();
*reloc_addr = value;
}
double t;
if (isless (xa, 0.5))
{
- if (__builtin_expect (xa < 0x1.0p-28, 0))
+ if (__glibc_unlikely (xa < 0x1.0p-28))
{
math_force_eval (huge + x);
return x;
t = xa + xa;
t = 0.5 * __log1p (t + t * xa / (1.0 - xa));
}
- else if (__builtin_expect (isless (xa, 1.0), 1))
+ else if (__glibc_likely (isless (xa, 1.0)))
t = 0.5 * __log1p ((xa + xa) / (1.0 - xa));
else
{
static const double lomark = (double) (DBL_MIN_EXP - DBL_MANT_DIG - 1);
/* Check for usual case. */
- if (__builtin_expect (isless (x, himark), 1))
+ if (__glibc_likely (isless (x, himark)))
{
/* Exceptional cases: */
- if (__builtin_expect (!isgreaterequal (x, lomark), 0))
+ if (__glibc_unlikely (!isgreaterequal (x, lomark)))
{
if (__isinf (x))
/* e^-inf == 0, with no error. */
}
/* determine ix = ilogb(x) */
- if (__builtin_expect (hx < 0x00100000, 0)) /* subnormal x */
+ if (__glibc_unlikely (hx < 0x00100000)) /* subnormal x */
{
if (hx == 0)
{
ix = (hx >> 20) - 1023;
/* determine iy = ilogb(y) */
- if (__builtin_expect (hy < 0x00100000, 0)) /* subnormal y */
+ if (__glibc_unlikely (hy < 0x00100000)) /* subnormal y */
{
if (hy == 0)
{
iy = (hy >> 20) - 1023;
/* set up {hx,lx}, {hy,ly} and align y to x */
- if (__builtin_expect (ix >= -1022, 1))
+ if (__glibc_likely (ix >= -1022))
hx = 0x00100000 | (0x000fffff & hx);
else /* subnormal x, shift x to normal */
{
lx = 0;
}
}
- if (__builtin_expect (iy >= -1022, 1))
+ if (__glibc_likely (iy >= -1022))
hy = 0x00100000 | (0x000fffff & hy);
else /* subnormal y, shift y to normal */
{
hx = hx + hx + (lx >> 31); lx = lx + lx;
iy -= 1;
}
- if (__builtin_expect (iy >= -1022, 1)) /* normalize output */
+ if (__glibc_likely (iy >= -1022)) /* normalize output */
{
hx = ((hx - 0x00100000) | ((iy + 1023) << 20));
INSERT_WORDS (x, hx | sx, lx);
EXTRACT_WORDS (hx, lx, x);
- if (__builtin_expect (((hx & 0x7fffffff) | lx) == 0, 0))
+ if (__glibc_unlikely (((hx & 0x7fffffff) | lx) == 0))
{
/* Return value for x == 0 is Inf with divide by zero exception. */
*signgamp = 0;
*signgamp = 0;
return (x - x) / (x - x);
}
- if (__builtin_expect ((unsigned int) hx == 0xfff00000 && lx == 0, 0))
+ if (__glibc_unlikely ((unsigned int) hx == 0xfff00000 && lx == 0))
{
/* x == -Inf. According to ISO this is NaN. */
*signgamp = 0;
return x - x;
}
- if (__builtin_expect ((hx & 0x7ff00000) == 0x7ff00000, 0))
+ if (__glibc_unlikely ((hx & 0x7ff00000) == 0x7ff00000))
{
/* Positive infinity (return positive infinity) or NaN (return
NaN). */
return a + b;
} /* x/y > 2**60 */
k = 0;
- if (__builtin_expect (ha > 0x5f300000, 0)) /* a>2**500 */
+ if (__glibc_unlikely (ha > 0x5f300000)) /* a>2**500 */
{
if (ha >= 0x7ff00000) /* Inf or NaN */
{
GET_HIGH_WORD (hx, x);
ix = hx & 0x7fffffff;
- if (__builtin_expect (ix >= 0x7ff00000, 0))
+ if (__glibc_unlikely (ix >= 0x7ff00000))
return one / x;
y = fabs (x);
if (ix >= 0x40000000) /* |x| >= 2.0 */
else
return z;
}
- if (__builtin_expect (ix < 0x3e400000, 0)) /* |x|<2**-27 */
+ if (__glibc_unlikely (ix < 0x3e400000)) /* |x|<2**-27 */
{
if (huge + x > one)
return 0.5 * x; /* inexact if x!=0 necessary */
EXTRACT_WORDS (hx, lx, x);
ix = 0x7fffffff & hx;
/* if Y1(NaN) is NaN, Y1(-inf) is NaN, Y1(inf) is 0 */
- if (__builtin_expect (ix >= 0x7ff00000, 0))
+ if (__glibc_unlikely (ix >= 0x7ff00000))
return one / (x + x * x);
- if (__builtin_expect ((ix | lx) == 0, 0))
+ if (__glibc_unlikely ((ix | lx) == 0))
return -HUGE_VAL + x;
/* -inf and overflow exception. */;
- if (__builtin_expect (hx < 0, 0))
+ if (__glibc_unlikely (hx < 0))
return zero / (zero * x);
if (ix >= 0x40000000) /* |x| >= 2.0 */
{
}
return z;
}
- if (__builtin_expect (ix <= 0x3c900000, 0)) /* x < 2**-54 */
+ if (__glibc_unlikely (ix <= 0x3c900000)) /* x < 2**-54 */
{
return (-tpi / x);
}
EXTRACT_WORDS (hx, lx, x);
ix = 0x7fffffff & hx;
/* if J(n,NaN) is NaN */
- if (__builtin_expect ((ix | ((u_int32_t) (lx | -lx)) >> 31) > 0x7ff00000, 0))
+ if (__glibc_unlikely ((ix | ((u_int32_t) (lx | -lx)) >> 31) > 0x7ff00000))
return x + x;
if (n < 0)
{
return (__ieee754_j1 (x));
sgn = (n & 1) & (hx >> 31); /* even n -- 0, odd n -- sign(x) */
x = fabs (x);
- if (__builtin_expect ((ix | lx) == 0 || ix >= 0x7ff00000, 0))
+ if (__glibc_unlikely ((ix | lx) == 0 || ix >= 0x7ff00000))
/* if x is 0 or inf */
b = zero;
else if ((double) n <= x)
EXTRACT_WORDS (hx, lx, x);
ix = 0x7fffffff & hx;
/* if Y(n,NaN) is NaN */
- if (__builtin_expect ((ix | ((u_int32_t) (lx | -lx)) >> 31) > 0x7ff00000, 0))
+ if (__glibc_unlikely ((ix | ((u_int32_t) (lx | -lx)) >> 31) > 0x7ff00000))
return x + x;
- if (__builtin_expect ((ix | lx) == 0, 0))
+ if (__glibc_unlikely ((ix | lx) == 0))
return -HUGE_VAL + x;
/* -inf and overflow exception. */;
- if (__builtin_expect (hx < 0, 0))
+ if (__glibc_unlikely (hx < 0))
return zero / (zero * x);
sign = 1;
if (n < 0)
return (__ieee754_y0 (x));
if (n == 1)
return (sign * __ieee754_y1 (x));
- if (__builtin_expect (ix == 0x7ff00000, 0))
+ if (__glibc_unlikely (ix == 0x7ff00000))
return zero;
if (ix >= 0x52D00000) /* x > 2**302 */
{ /* (x >> n**2)
ux = num.i[HIGH_HALF];
dx = num.i[LOW_HALF];
n = 0;
- if (__builtin_expect (ux < 0x00100000, 0))
+ if (__glibc_unlikely (ux < 0x00100000))
{
- if (__builtin_expect (((ux & 0x7fffffff) | dx) == 0, 0))
+ if (__glibc_unlikely (((ux & 0x7fffffff) | dx) == 0))
return MHALF / 0.0; /* return -INF */
- if (__builtin_expect (ux < 0, 0))
+ if (__glibc_unlikely (ux < 0))
return (x - x) / 0.0; /* return NaN */
n -= 54;
x *= two54.d; /* scale x */
num.d = x;
}
- if (__builtin_expect (ux >= 0x7ff00000, 0))
+ if (__glibc_unlikely (ux >= 0x7ff00000))
return x + x; /* INF or NaN */
/* Regular values of x */
w = x - 1;
- if (__builtin_expect (ABS (w) > U03, 1))
+ if (__glibc_likely (ABS (w) > U03))
goto case_03;
/*--- Stage I, the case abs(x-1) < 0.03 */
k = 0;
if (hx < 0x00100000)
{ /* x < 2**-1022 */
- if (__builtin_expect (((hx & 0x7fffffff) | lx) == 0, 0))
+ if (__glibc_unlikely (((hx & 0x7fffffff) | lx) == 0))
return -two54 / (x - x); /* log(+-0)=-inf */
- if (__builtin_expect (hx < 0, 0))
+ if (__glibc_unlikely (hx < 0))
return (x - x) / (x - x); /* log(-#) = NaN */
k -= 54;
x *= two54; /* subnormal number, scale up x */
GET_HIGH_WORD (hx, x);
}
- if (__builtin_expect (hx >= 0x7ff00000, 0))
+ if (__glibc_unlikely (hx >= 0x7ff00000))
return x + x;
k += (hx >> 20) - 1023;
i = ((u_int32_t) k & 0x80000000) >> 31;
k = 0;
if (hx < 0x00100000)
{ /* x < 2**-1022 */
- if (__builtin_expect (((hx & 0x7fffffff) | lx) == 0, 0))
+ if (__glibc_unlikely (((hx & 0x7fffffff) | lx) == 0))
return -two54 / (x - x); /* log(+-0)=-inf */
- if (__builtin_expect (hx < 0, 0))
+ if (__glibc_unlikely (hx < 0))
return (x - x) / (x - x); /* log(-#) = NaN */
k -= 54;
x *= two54; /* subnormal number, scale up x */
GET_HIGH_WORD (hx, x);
}
- if (__builtin_expect (hx >= 0x7ff00000, 0))
+ if (__glibc_unlikely (hx >= 0x7ff00000))
return x + x;
k += (hx >> 20) - 1023;
hx &= 0x000fffff;
ix = jx & 0x7fffffff;
/* x is INF or NaN */
- if (__builtin_expect (ix >= 0x7ff00000, 0))
+ if (__glibc_unlikely (ix >= 0x7ff00000))
return x + x;
h = 0.5;
/* |x| in [0,22], return sign(x)*0.5*(E+E/(E+1))) */
if (ix < 0x40360000) /* |x|<22 */
{
- if (__builtin_expect (ix < 0x3e300000, 0)) /* |x|<2**-28 */
+ if (__glibc_unlikely (ix < 0x3e300000)) /* |x|<2**-28 */
if (shuge + x > one)
return x;
/* sinh(tiny) = tiny with inexact */
int32_t hx, ix;
GET_HIGH_WORD (hx, x);
ix = hx & 0x7fffffff;
- if (__builtin_expect (ix < 0x3e300000, 0)) /* |x|<2**-28 */
+ if (__glibc_unlikely (ix < 0x3e300000)) /* |x|<2**-28 */
{
if (huge + x > one)
return x; /* return x inexact except 0 */
}
- if (__builtin_expect (ix > 0x41b00000, 0)) /* |x| > 2**28 */
+ if (__glibc_unlikely (ix > 0x41b00000)) /* |x| > 2**28 */
{
if (ix >= 0x7ff00000)
return x + x; /* x is inf or NaN */
}
/* Ensure correct sign of exact 0 + 0. */
- if (__builtin_expect ((x == 0 || y == 0) && z == 0, 0))
+ if (__glibc_unlikely ((x == 0 || y == 0) && z == 0))
return x * y + z;
fenv_t env;
/* Perform m2 + a2 addition with round to odd. */
u.d = a2 + m2;
- if (__builtin_expect (adjust < 0, 0))
+ if (__glibc_unlikely (adjust < 0))
{
if ((u.ieee.mantissa1 & 1) == 0)
u.ieee.mantissa1 |= libc_fetestexcept (FE_INEXACT) != 0;
/* Reset rounding mode and test for inexact simultaneously. */
int j = libc_feupdateenv_test (&env, FE_INEXACT) != 0;
- if (__builtin_expect (adjust == 0, 1))
+ if (__glibc_likely (adjust == 0))
{
if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7ff)
u.ieee.mantissa1 |= j;
/* Result is a1 + u.d. */
return a1 + u.d;
}
- else if (__builtin_expect (adjust > 0, 1))
+ else if (__glibc_likely (adjust > 0))
{
if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7ff)
u.ieee.mantissa1 |= j;
k = 1;
if (hx < 0x3FDA827A) /* x < 0.41422 */
{
- if (__builtin_expect (ax >= 0x3ff00000, 0)) /* x <= -1.0 */
+ if (__glibc_unlikely (ax >= 0x3ff00000)) /* x <= -1.0 */
{
if (x == -1.0)
return -two54 / (x - x); /* log1p(-1)=+inf */
else
return (x - x) / (x - x); /* log1p(x<-1)=NaN */
}
- if (__builtin_expect (ax < 0x3e200000, 0)) /* |x| < 2**-29 */
+ if (__glibc_unlikely (ax < 0x3e200000)) /* |x| < 2**-29 */
{
math_force_eval (two54 + x); /* raise inexact */
if (ax < 0x3c900000) /* |x| < 2**-54 */
k = 0; f = x; hu = 1;
} /* -0.2929<x<0.41422 */
}
- else if (__builtin_expect (hx >= 0x7ff00000, 0))
+ else if (__glibc_unlikely (hx >= 0x7ff00000))
return x + x;
if (k != 0)
{
return -1.0 / fabs (x);
if (ix >= 0x7ff00000)
return x * x;
- if (__builtin_expect ((rix = ix >> 20) == 0, 0))
+ if (__glibc_unlikely ((rix = ix >> 20) == 0))
{
/* POSIX specifies that denormal number is treated as
though it were normalized. */
}
}
}
- else if (__builtin_expect (j0 > 51, 0)) /* no fraction part */
+ else if (__glibc_unlikely (j0 > 51)) /* no fraction part */
{
*iptr = x * one;
/* We must handle NaNs separately. */
int32_t k, hx, lx;
EXTRACT_WORDS (hx, lx, x);
k = (hx & 0x7ff00000) >> 20; /* extract exponent */
- if (__builtin_expect (k == 0, 0)) /* 0 or subnormal x */
+ if (__glibc_unlikely (k == 0)) /* 0 or subnormal x */
{
if ((lx | (hx & 0x7fffffff)) == 0)
return x; /* +-0 */
GET_HIGH_WORD (hx, x);
k = ((hx & 0x7ff00000) >> 20) - 54;
}
- if (__builtin_expect (k == 0x7ff, 0))
+ if (__glibc_unlikely (k == 0x7ff))
return x + x; /* NaN or Inf */
- if (__builtin_expect (n < -50000, 0))
+ if (__glibc_unlikely (n < -50000))
return tiny * __copysign (tiny, x); /*underflow*/
- if (__builtin_expect (n > 50000 || k + n > 0x7fe, 0))
+ if (__glibc_unlikely (n > 50000 || k + n > 0x7fe))
return huge * __copysign (huge, x); /* overflow */
/* Now k and n are bounded we know that k = k+n does not
overflow. */
k = k + n;
- if (__builtin_expect (k > 0, 1)) /* normal result */
+ if (__glibc_likely (k > 0)) /* normal result */
{
SET_HIGH_WORD (x, (hx & 0x800fffff) | (k << 20)); return x;
}
int32_t k, hx, lx;
EXTRACT_WORDS (hx, lx, x);
k = (hx & 0x7ff00000) >> 20; /* extract exponent */
- if (__builtin_expect (k == 0, 0)) /* 0 or subnormal x */
+ if (__glibc_unlikely (k == 0)) /* 0 or subnormal x */
{
if ((lx | (hx & 0x7fffffff)) == 0)
return x; /* +-0 */
GET_HIGH_WORD (hx, x);
k = ((hx & 0x7ff00000) >> 20) - 54;
}
- if (__builtin_expect (k == 0x7ff, 0))
+ if (__glibc_unlikely (k == 0x7ff))
return x + x; /* NaN or Inf */
- if (__builtin_expect (n < -50000, 0))
+ if (__glibc_unlikely (n < -50000))
return tiny * __copysign (tiny, x); /*underflow*/
- if (__builtin_expect (n > 50000 || k + n > 0x7fe, 0))
+ if (__glibc_unlikely (n > 50000 || k + n > 0x7fe))
return huge * __copysign (huge, x); /* overflow */
/* Now k and n are bounded we know that k = k+n does not
overflow. */
k = k + n;
- if (__builtin_expect (k > 0, 1)) /* normal result */
+ if (__glibc_likely (k > 0)) /* normal result */
{
SET_HIGH_WORD (x, (hx & 0x800fffff) | (k << 20)); return x;
}
if (hx > INT64_C (0x4000000000000000))
{
- if (__builtin_expect (hx >= INT64_C (0x41b0000000000000), 0))
+ if (__glibc_unlikely (hx >= INT64_C (0x41b0000000000000)))
{
/* x > 2**28 */
if (hx >= INT64_C (0x7ff0000000000000))
double t = x * x;
return __ieee754_log (2.0 * x - one / (x + __ieee754_sqrt (t - one)));
}
- else if (__builtin_expect (hx > INT64_C (0x3ff0000000000000), 1))
+ else if (__glibc_likely (hx > INT64_C (0x3ff0000000000000)))
{
/* 1<x<2 */
double t = x - one;
return __log1p (t + __ieee754_sqrt (2.0 * t + t * t));
}
- else if (__builtin_expect (hx == INT64_C (0x3ff0000000000000), 1))
+ else if (__glibc_likely (hx == INT64_C (0x3ff0000000000000)))
return 0.0; /* acosh(1) = 0 */
else /* x < 1 */
return (x - x) / (x - x);
k = 0;
if (hx < INT64_C(0x0010000000000000))
{ /* x < 2**-1022 */
- if (__builtin_expect ((hx & UINT64_C(0x7fffffffffffffff)) == 0, 0))
+ if (__glibc_unlikely ((hx & UINT64_C(0x7fffffffffffffff)) == 0))
return -two54 / (x - x); /* log(+-0)=-inf */
- if (__builtin_expect (hx < 0, 0))
+ if (__glibc_unlikely (hx < 0))
return (x - x) / (x - x); /* log(-#) = NaN */
k -= 54;
x *= two54; /* subnormal number, scale up x */
EXTRACT_WORDS64 (hx, x);
}
/* scale up resulted in a NaN number */
- if (__builtin_expect (hx >= UINT64_C(0x7ff0000000000000), 0))
+ if (__glibc_unlikely (hx >= UINT64_C(0x7ff0000000000000)))
return x + x;
k += (hx >> 52) - 1023;
i = ((uint64_t) k & UINT64_C(0x8000000000000000)) >> 63;
k = 0;
if (hx < INT64_C(0x0010000000000000))
{ /* x < 2**-1022 */
- if (__builtin_expect ((hx & UINT64_C(0x7fffffffffffffff)) == 0, 0))
+ if (__glibc_unlikely ((hx & UINT64_C(0x7fffffffffffffff)) == 0))
return -two54 / (x - x); /* log(+-0)=-inf */
- if (__builtin_expect (hx < 0, 0))
+ if (__glibc_unlikely (hx < 0))
return (x - x) / (x - x); /* log(-#) = NaN */
k -= 54;
x *= two54; /* subnormal number, scale up x */
EXTRACT_WORDS64 (hx, x);
}
- if (__builtin_expect (hx >= UINT64_C(0x7ff0000000000000), 0))
+ if (__glibc_unlikely (hx >= UINT64_C(0x7ff0000000000000)))
return x + x;
k += (hx >> 52) - 1023;
hx &= UINT64_C(0x000fffffffffffff);
int32_t ex = 0x7ff & (ix >> 52);
int e = 0;
- if (__builtin_expect (ex != 0x7ff && x != 0.0, 1))
+ if (__glibc_likely (ex != 0x7ff && x != 0.0))
{
/* Not zero and finite. */
e = ex - 1022;
- if (__builtin_expect (ex == 0, 0))
+ if (__glibc_unlikely (ex == 0))
{
/* Subnormal. */
x *= 0x1p54;
ex = ix >> 52;
if (ex == 0x7ff)
return x * x;
- if (__builtin_expect (ex == 0, 0))
+ if (__glibc_unlikely (ex == 0))
{
int m = __builtin_clzll (ix);
ex -= m - 12;
hx &= UINT64_C(0x7fffffffffffffff);
/* Purge off exception values. */
- if (__builtin_expect (hy == 0, 0))
+ if (__glibc_unlikely (hy == 0))
return (x * y) / (x * y); /* y = 0 */
if (__builtin_expect (hx >= UINT64_C(0x7ff0000000000000) /* x not finite */
|| hy > UINT64_C(0x7ff0000000000000), 0))/* y is NaN */
if (hy <= UINT64_C(0x7fbfffffffffffff))
x = __ieee754_fmod (x, 8 * y); /* now x < 8y */
- if (__builtin_expect (hx == hy, 0))
+ if (__glibc_unlikely (hx == hy))
{
*quo = qs ? -1 : 1;
return zero * x;
EXTRACT_WORDS64 (i0, x);
j0 = ((i0 >> 52) & 0x7ff) - 0x3ff;
- if (__builtin_expect (j0 < 52, 1))
+ if (__glibc_likely (j0 < 52))
{
if (j0 < 0)
{
float t;
if (isless (xa, 0.5f))
{
- if (__builtin_expect (xa < 0x1.0p-28f, 0))
+ if (__glibc_unlikely (xa < 0x1.0p-28f))
{
math_force_eval (huge + x);
return x;
t = xa + xa;
t = 0.5f * __log1pf (t + t * xa / (1.0f - xa));
}
- else if (__builtin_expect (isless (xa, 1.0f), 1))
+ else if (__glibc_likely (isless (xa, 1.0f)))
t = 0.5f * __log1pf ((xa + xa) / (1.0f - xa));
else
{
GET_FLOAT_WORD (hx, x);
- if (__builtin_expect ((hx & 0x7fffffff) == 0, 0))
+ if (__glibc_unlikely ((hx & 0x7fffffff) == 0))
{
/* Return value for x == 0 is Inf with divide by zero exception. */
*signgamp = 0;
*signgamp = 0;
return (x - x) / (x - x);
}
- if (__builtin_expect (hx == 0xff800000, 0))
+ if (__glibc_unlikely (hx == 0xff800000))
{
/* x == -Inf. According to ISO this is NaN. */
*signgamp = 0;
return x - x;
}
- if (__builtin_expect ((hx & 0x7f800000) == 0x7f800000, 0))
+ if (__glibc_unlikely ((hx & 0x7f800000) == 0x7f800000))
{
/* Positive infinity (return positive infinity) or NaN (return
NaN). */
return (float) -1.0 / fabsf (x);
if (ix >= 0x7f800000)
return x * x;
- if (__builtin_expect ((rix = ix >> 23) == 0, 0))
+ if (__glibc_unlikely ((rix = ix >> 23) == 0))
{
/* POSIX specifies that denormal number is treated as
though it were normalized. */
}
/* Ensure correct sign of exact 0 + 0. */
- if (__builtin_expect ((x == 0 || y == 0) && z == 0, 0))
+ if (__glibc_unlikely ((x == 0 || y == 0) && z == 0))
return x * y + z;
fenv_t env;
/* Perform m2 + a2 addition with round to odd. */
u.d = a2 + m2;
- if (__builtin_expect (adjust == 0, 1))
+ if (__glibc_likely (adjust == 0))
{
if ((u.ieee.mantissa3 & 1) == 0 && u.ieee.exponent != 0x7fff)
u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
/* Result is a1 + u.d. */
return a1 + u.d;
}
- else if (__builtin_expect (adjust > 0, 1))
+ else if (__glibc_likely (adjust > 0))
{
if ((u.ieee.mantissa3 & 1) == 0 && u.ieee.exponent != 0x7fff)
u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
(hx>=0x7ff0000000000000LL)|| /* y=0,or x not finite */
(hy>0x7ff0000000000000LL),0)) /* or y is NaN */
return (x*y)/(x*y);
- if (__builtin_expect (hx <= hy, 0))
+ if (__glibc_unlikely (hx <= hy))
{
/* If |x| < |y| return x. */
if (hx < hy)
ldbl_extract_mantissa(&hx, &lx, &ix, x);
ldbl_extract_mantissa(&hy, &ly, &iy, y);
- if (__builtin_expect (ix == -IEEE754_DOUBLE_BIAS, 0))
+ if (__glibc_unlikely (ix == -IEEE754_DOUBLE_BIAS))
{
/* subnormal x, shift x to normal. */
while ((hx & (1LL << 48)) == 0)
}
}
- if (__builtin_expect (iy == -IEEE754_DOUBLE_BIAS, 0))
+ if (__glibc_unlikely (iy == -IEEE754_DOUBLE_BIAS))
{
/* subnormal y, shift y to normal. */
while ((hy & (1LL << 48)) == 0)
{
double two52 = 0x1p52;
- if (__builtin_expect ((__builtin_fabs (a) < two52), 1))
+ if (__glibc_likely ((__builtin_fabs (a) < two52)))
{
- if (__builtin_expect ((a > 0.0), 1))
+ if (__glibc_likely ((a > 0.0)))
{
a += two52;
a -= two52;
}
- else if (__builtin_expect ((a < 0.0), 1))
+ else if (__glibc_likely ((a < 0.0)))
{
a = two52 - a;
a = -(a - two52);
{
save_round = __fegetround ();
- if (__builtin_expect ((xh == -(double) (-__LONG_LONG_MAX__ - 1)), 0))
+ if (__glibc_unlikely ((xh == -(double) (-__LONG_LONG_MAX__ - 1))))
{
/* When XH is 9223372036854775808.0, converting to long long will
overflow, resulting in an invalid operation. However, XL might
res = hi + lo;
/* This is just sign(hi) == sign(lo) && sign(res) != sign(hi). */
- if (__builtin_expect (((~(hi ^ lo) & (res ^ hi)) < 0), 0))
+ if (__glibc_unlikely (((~(hi ^ lo) & (res ^ hi)) < 0)))
goto overflow;
xh -= lo;
break;
}
- if (__builtin_expect (((~(hi ^ (res - hi)) & (res ^ hi)) < 0), 0))
+ if (__glibc_unlikely (((~(hi ^ (res - hi)) & (res ^ hi)) < 0)))
goto overflow;
return res;
#endif
)
{
- if (__builtin_expect ((xh == -(double) (-__LONG_LONG_MAX__ - 1)), 0))
+ if (__glibc_unlikely ((xh == -(double) (-__LONG_LONG_MAX__ - 1))))
{
/* When XH is 9223372036854775808.0, converting to long long will
overflow, resulting in an invalid operation. However, XL might
res = hi + lo;
/* This is just sign(hi) == sign(lo) && sign(res) != sign(hi). */
- if (__builtin_expect (((~(hi ^ lo) & (res ^ hi)) < 0), 0))
+ if (__glibc_unlikely (((~(hi ^ lo) & (res ^ hi)) < 0)))
goto overflow;
xh -= lo;
res -= 1;
}
- if (__builtin_expect (((~(hi ^ (res - hi)) & (res ^ hi)) < 0), 0))
+ if (__glibc_unlikely (((~(hi ^ (res - hi)) & (res ^ hi)) < 0)))
goto overflow;
return res;
return -1.0 / fabs (x);
if (hx >= 0x7ff0000000000000LL)
return x * x;
- if (__builtin_expect ((rhx = hx >> 52) == 0, 0))
+ if (__glibc_unlikely ((rhx = hx >> 52) == 0))
{
/* POSIX specifies that denormal number is treated as
though it were normalized. */
hi = llhi;
xh -= hi;
#else
- if (__builtin_expect ((xh == -(double) (-__LONG_MAX__ - 1)), 0))
+ if (__glibc_unlikely ((xh == -(double) (-__LONG_MAX__ - 1))))
{
/* When XH is 9223372036854775808.0, converting to long long will
overflow, resulting in an invalid operation. However, XL might
res = hi + lo;
/* This is just sign(hi) == sign(lo) && sign(res) != sign(hi). */
- if (__builtin_expect (((~(hi ^ lo) & (res ^ hi)) < 0), 0))
+ if (__glibc_unlikely (((~(hi ^ lo) & (res ^ hi)) < 0)))
goto overflow;
xh -= lo;
break;
}
- if (__builtin_expect (((~(hi ^ (res - hi)) & (res ^ hi)) < 0), 0))
+ if (__glibc_unlikely (((~(hi ^ (res - hi)) & (res ^ hi)) < 0)))
goto overflow;
return res;
hi = llhi;
xh -= hi;
#else
- if (__builtin_expect ((xh == -(double) (-__LONG_MAX__ - 1)), 0))
+ if (__glibc_unlikely ((xh == -(double) (-__LONG_MAX__ - 1))))
{
/* When XH is 9223372036854775808.0, converting to long long will
overflow, resulting in an invalid operation. However, XL might
res = hi + lo;
/* This is just sign(hi) == sign(lo) && sign(res) != sign(hi). */
- if (__builtin_expect (((~(hi ^ lo) & (res ^ hi)) < 0), 0))
+ if (__glibc_unlikely (((~(hi ^ lo) & (res ^ hi)) < 0)))
goto overflow;
xh -= lo;
res -= 1;
}
- if (__builtin_expect (((~(hi ^ (res - hi)) & (res ^ hi)) < 0), 0))
+ if (__glibc_unlikely (((~(hi ^ (res - hi)) & (res ^ hi)) < 0)))
goto overflow;
return res;
GET_LDOUBLE_WORDS (es, hx, lx, x);
- if (__builtin_expect (((es & 0x7fff) | hx | lx) == 0, 0))
+ if (__glibc_unlikely (((es & 0x7fff) | hx | lx) == 0))
{
/* Return value for x == 0 is Inf with divide by zero exception. */
*signgamp = 0;
return 1.0 / x;
}
- if (__builtin_expect (es == 0xffffffff && ((hx & 0x7fffffff) | lx) == 0, 0))
+ if (__glibc_unlikely (es == 0xffffffff && ((hx & 0x7fffffff) | lx) == 0))
{
/* x == -Inf. According to ISO this is NaN. */
*signgamp = 0;
return x - x;
}
- if (__builtin_expect ((es & 0x7fff) == 0x7fff, 0))
+ if (__glibc_unlikely ((es & 0x7fff) == 0x7fff))
{
/* Positive infinity (return positive infinity) or NaN (return
NaN). */
GET_LDOUBLE_EXP (se, x);
ix = se & 0x7fff;
- if (__builtin_expect (ix >= 0x7fff, 0))
+ if (__glibc_unlikely (ix >= 0x7fff))
return one / (x * x);
x = fabsl (x);
if (ix >= 0x4000) /* |x| >= 2.0 */
* j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
* y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
*/
- if (__builtin_expect (ix > 0x4080, 0)) /* 2^129 */
+ if (__glibc_unlikely (ix > 0x4080)) /* 2^129 */
z = (invsqrtpi * cc) / __ieee754_sqrtl (x);
else
{
}
return z;
}
- if (__builtin_expect (ix < 0x3fef, 0)) /* |x| < 2**-16 */
+ if (__glibc_unlikely (ix < 0x3fef)) /* |x| < 2**-16 */
{
/* raise inexact if x != 0 */
math_force_eval (huge + x);
GET_LDOUBLE_WORDS (se, i0, i1, x);
ix = se & 0x7fff;
/* Y0(NaN) is NaN, y0(-inf) is Nan, y0(inf) is 0 */
- if (__builtin_expect (se & 0x8000, 0))
+ if (__glibc_unlikely (se & 0x8000))
return zero / (zero * x);
- if (__builtin_expect (ix >= 0x7fff, 0))
+ if (__glibc_unlikely (ix >= 0x7fff))
return one / (x + x * x);
- if (__builtin_expect ((i0 | i1) == 0, 0))
+ if (__glibc_unlikely ((i0 | i1) == 0))
return -HUGE_VALL + x; /* -inf and overflow exception. */
if (ix >= 0x4000)
{ /* |x| >= 2.0 */
else
ss = z / cc;
}
- if (__builtin_expect (ix > 0x4080, 0)) /* 1e39 */
+ if (__glibc_unlikely (ix > 0x4080)) /* 1e39 */
z = (invsqrtpi * ss) / __ieee754_sqrtl (x);
else
{
}
return z;
}
- if (__builtin_expect (ix <= 0x3fde, 0)) /* x < 2^-33 */
+ if (__glibc_unlikely (ix <= 0x3fde)) /* x < 2^-33 */
{
z = -7.380429510868722527629822444004602747322E-2L
+ tpi * __ieee754_logl (x);
GET_LDOUBLE_EXP (se, x);
ix = se & 0x7fff;
- if (__builtin_expect (ix >= 0x7fff, 0))
+ if (__glibc_unlikely (ix >= 0x7fff))
return one / x;
y = fabsl (x);
if (ix >= 0x4000)
* j1(x) = 1/sqrt(pi) * (P(1,x)*cc - Q(1,x)*ss) / sqrt(x)
* y1(x) = 1/sqrt(pi) * (P(1,x)*ss + Q(1,x)*cc) / sqrt(x)
*/
- if (__builtin_expect (ix > 0x4080, 0))
+ if (__glibc_unlikely (ix > 0x4080))
z = (invsqrtpi * cc) / __ieee754_sqrtl (y);
else
{
else
return z;
}
- if (__builtin_expect (ix < 0x3fde, 0)) /* |x| < 2^-33 */
+ if (__glibc_unlikely (ix < 0x3fde)) /* |x| < 2^-33 */
{
if (huge + x > one)
return 0.5 * x; /* inexact if x!=0 necessary */
GET_LDOUBLE_WORDS (se, i0, i1, x);
ix = se & 0x7fff;
/* if Y1(NaN) is NaN, Y1(-inf) is NaN, Y1(inf) is 0 */
- if (__builtin_expect (se & 0x8000, 0))
+ if (__glibc_unlikely (se & 0x8000))
return zero / (zero * x);
- if (__builtin_expect (ix >= 0x7fff, 0))
+ if (__glibc_unlikely (ix >= 0x7fff))
return one / (x + x * x);
- if (__builtin_expect ((i0 | i1) == 0, 0))
+ if (__glibc_unlikely ((i0 | i1) == 0))
return -HUGE_VALL + x; /* -inf and overflow exception. */
if (ix >= 0x4000)
{ /* |x| >= 2.0 */
* sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
* to compute the worse one.
*/
- if (__builtin_expect (ix > 0x4080, 0))
+ if (__glibc_unlikely (ix > 0x4080))
z = (invsqrtpi * ss) / __ieee754_sqrtl (x);
else
{
}
return z;
}
- if (__builtin_expect (ix <= 0x3fbe, 0))
+ if (__glibc_unlikely (ix <= 0x3fbe))
{ /* x < 2**-65 */
return (-tpi / x);
}
ix = se & 0x7fff;
/* if J(n,NaN) is NaN */
- if (__builtin_expect ((ix == 0x7fff) && ((i0 & 0x7fffffff) != 0), 0))
+ if (__glibc_unlikely ((ix == 0x7fff) && ((i0 & 0x7fffffff) != 0)))
return x + x;
if (n < 0)
{
return (__ieee754_j1l (x));
sgn = (n & 1) & (se >> 15); /* even n -- 0, odd n -- sign(x) */
x = fabsl (x);
- if (__builtin_expect ((ix | i0 | i1) == 0 || ix >= 0x7fff, 0))
+ if (__glibc_unlikely ((ix | i0 | i1) == 0 || ix >= 0x7fff))
/* if x is 0 or inf */
b = zero;
else if ((long double) n <= x)
return (__ieee754_y0l (x));
if (n == 1)
return (sign * __ieee754_y1l (x));
- if (__builtin_expect (ix == 0x7fff, 0))
+ if (__glibc_unlikely (ix == 0x7fff))
return zero;
if (ix >= 0x412D)
{ /* x > 2**302 */
double
__fma (double x, double y, double z)
{
- if (__builtin_expect (isinf (z), 0))
+ if (__glibc_unlikely (isinf (z)))
{
/* If z is Inf, but x and y are finite, the result should be
z rather than NaN. */
}
/* Ensure correct sign of exact 0 + 0. */
- if (__builtin_expect ((x == 0 || y == 0) && z == 0, 0))
+ if (__glibc_unlikely ((x == 0 || y == 0) && z == 0))
return x * y + z;
fenv_t env;
}
/* Ensure correct sign of exact 0 + 0. */
- if (__builtin_expect ((x == 0 || y == 0) && z == 0, 0))
+ if (__glibc_unlikely ((x == 0 || y == 0) && z == 0))
return x * y + z;
fenv_t env;
/* Perform m2 + a2 addition with round to odd. */
u.d = a2 + m2;
- if (__builtin_expect (adjust == 0, 1))
+ if (__glibc_likely (adjust == 0))
{
if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7fff)
u.ieee.mantissa1 |= fetestexcept (FE_INEXACT) != 0;
/* Result is a1 + u.d. */
return a1 + u.d;
}
- else if (__builtin_expect (adjust > 0, 1))
+ else if (__glibc_likely (adjust > 0))
{
if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7fff)
u.ieee.mantissa1 |= fetestexcept (FE_INEXACT) != 0;
static int
hp_timing_getres (struct timespec *res)
{
- if (__builtin_expect (nsec == 0, 0))
+ if (__glibc_unlikely (nsec == 0))
{
hp_timing_t freq;
code against multiple execution since all of them should
lead to the same result. */
freq = __get_clockfreq ();
- if (__builtin_expect (freq == 0, 0))
+ if (__glibc_unlikely (freq == 0))
/* Something went wrong. */
return -1;
{
long int clk_tck = sysconf (_SC_CLK_TCK);
- if (__builtin_expect (clk_tck != -1, 1))
+ if (__glibc_likely (clk_tck != -1))
{
/* This implementation assumes that the realtime clock has a
resolution higher than 1 second. This is the case for any
if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &statbuf), 0) < 0)
return NULL;
- if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0))
+ if (__glibc_unlikely (! S_ISDIR (statbuf.st_mode)))
{
__set_errno (ENOTDIR);
return NULL;
/* Make sure the descriptor allows for reading. */
int flags = __fcntl (fd, F_GETFL);
- if (__builtin_expect (flags == -1, 0))
+ if (__glibc_unlikely (flags == -1))
return NULL;
- if (__builtin_expect ((flags & O_ACCMODE) == O_WRONLY, 0))
+ if (__glibc_unlikely ((flags & O_ACCMODE) == O_WRONLY))
{
__set_errno (EINVAL);
return NULL;
bool malloc_namebuf = false;
char *namebuf = (char *) name;
- if (__builtin_expect (scope_delim != NULL, 0))
+ if (__glibc_unlikely (scope_delim != NULL))
{
if (malloc_name)
*scope_delim = '\0';
the information. */
struct sort_result_combo src
= { .results = results, .nresults = nresults };
- if (__builtin_expect (gaiconf_reload_flag_ever_set, 0))
+ if (__glibc_unlikely (gaiconf_reload_flag_ever_set))
{
__libc_lock_define_initialized (static, lock);
performed on, say, a tape device might have undesirable effects. */
if (__builtin_expect (__xstat64 (_STAT_VER, name, &statbuf), 0) < 0)
return NULL;
- if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0))
+ if (__glibc_unlikely (! S_ISDIR (statbuf.st_mode)))
{
__set_errno (ENOTDIR);
return NULL;
the `stat' call. */
if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &statbuf), 0) < 0)
goto lose;
- if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0))
+ if (__glibc_unlikely (! S_ISDIR (statbuf.st_mode)))
{
__set_errno (ENOTDIR);
lose:
__sleep (unsigned int seconds)
{
/* This is not necessary but some buggy programs depend on it. */
- if (__builtin_expect (seconds == 0, 0))
+ if (__glibc_unlikely (seconds == 0))
{
#ifdef CANCELLATION_P
CANCELLATION_P (THREAD_SELF);
# include <hp-timing.h>
# if HP_TIMING_AVAIL
# define RANDOM_BITS(Var) \
- if (__builtin_expect (value == UINT64_C (0), 0)) \
+ if (__glibc_unlikely (value == UINT64_C (0))) \
{ \
/* If this is the first time this function is used initialize \
the variable we accumulate the value in to some somewhat \
{
unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
- if (__builtin_expect (r_type == R_PPC_IRELATIVE, 1))
+ if (__glibc_likely (r_type == R_PPC_IRELATIVE))
{
Elf32_Addr *const reloc_addr = (void *) reloc->r_offset;
Elf32_Addr value = elf_ifunc_invoke(reloc->r_addend);
break;
case R_PPC_ADDR24:
- if (__builtin_expect (finaladdr > 0x01fffffc && finaladdr < 0xfe000000, 0))
+ if (__glibc_unlikely (finaladdr > 0x01fffffc && finaladdr < 0xfe000000))
_dl_reloc_overflow (map, "R_PPC_ADDR24", reloc_addr, refsym);
*reloc_addr = (*reloc_addr & 0xfc000003) | (finaladdr & 0x3fffffc);
break;
case R_PPC_ADDR16:
- if (__builtin_expect (finaladdr > 0x7fff && finaladdr < 0xffff8000, 0))
+ if (__glibc_unlikely (finaladdr > 0x7fff && finaladdr < 0xffff8000))
_dl_reloc_overflow (map, "R_PPC_ADDR16", reloc_addr, refsym);
*(Elf32_Half*) reloc_addr = finaladdr;
break;
case R_PPC_UADDR16:
- if (__builtin_expect (finaladdr > 0x7fff && finaladdr < 0xffff8000, 0))
+ if (__glibc_unlikely (finaladdr > 0x7fff && finaladdr < 0xffff8000))
_dl_reloc_overflow (map, "R_PPC_UADDR16", reloc_addr, refsym);
((union unaligned *) reloc_addr)->u2 = finaladdr;
break;
case R_PPC_ADDR14:
case R_PPC_ADDR14_BRTAKEN:
case R_PPC_ADDR14_BRNTAKEN:
- if (__builtin_expect (finaladdr > 0x7fff && finaladdr < 0xffff8000, 0))
+ if (__glibc_unlikely (finaladdr > 0x7fff && finaladdr < 0xffff8000))
_dl_reloc_overflow (map, "R_PPC_ADDR14", reloc_addr, refsym);
*reloc_addr = (*reloc_addr & 0xffff0003) | (finaladdr & 0xfffc);
if (rinfo != R_PPC_ADDR14)
inline void do_reloc16 (const char *r_name, Elf32_Addr value)
{
- if (__builtin_expect (value > 0x7fff && value < 0xffff8000, 0))
+ if (__glibc_unlikely (value > 0x7fff && value < 0xffff8000))
_dl_reloc_overflow (map, r_name, reloc_addr, refsym);
*(Elf32_Half *) reloc_addr = value;
}
extern void _dl_runtime_resolve (void);
extern void _dl_prof_resolve (void);
- if (__builtin_expect (!profile, 1))
+ if (__glibc_likely (!profile))
dlrr = _dl_runtime_resolve;
else
{
return;
}
- if (__builtin_expect (r_type == R_PPC_NONE, 0))
+ if (__glibc_unlikely (r_type == R_PPC_NONE))
return;
/* binutils on ppc32 includes st_value in r_addend for relocations
{
unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
- if (__builtin_expect (r_type == R_PPC64_IRELATIVE, 1))
+ if (__glibc_likely (r_type == R_PPC64_IRELATIVE))
{
Elf64_Addr *const reloc_addr = (void *) reloc->r_offset;
Elf64_Addr value = elf_ifunc_invoke(reloc->r_addend);
*reloc_addr = value;
}
- else if (__builtin_expect (r_type == R_PPC64_JMP_IREL, 1))
+ else if (__glibc_likely (r_type == R_PPC64_JMP_IREL))
{
Elf64_Addr *const reloc_addr = (void *) reloc->r_offset;
Elf64_Addr value = elf_ifunc_invoke(reloc->r_addend);
return;
}
- if (__builtin_expect (r_type == R_PPC64_NONE, 0))
+ if (__glibc_unlikely (r_type == R_PPC64_NONE))
return;
/* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
return;
case R_PPC64_IRELATIVE:
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = resolve_ifunc (value, map, sym_map);
*reloc_addr = value;
return;
case R_PPC64_JMP_IREL:
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = resolve_ifunc (value, map, sym_map);
/* Fall thru */
case R_PPC64_JMP_SLOT:
int result = 0;
/* Send the signal to notify about finished processing of the request. */
- if (__builtin_expect (sigev->sigev_notify == SIGEV_THREAD, 0))
+ if (__glibc_unlikely (sigev->sigev_notify == SIGEV_THREAD))
{
/* We have to start a thread. */
pthread_t tid;
int nent;
const struct timespec *timeout;
{
- if (__builtin_expect (nent < 0, 0))
+ if (__glibc_unlikely (nent < 0))
{
__set_errno (EINVAL);
return -1;
#ifndef DONT_NEED_AIO_MISC_COND
/* Release the conditional variable. */
- if (__builtin_expect (pthread_cond_destroy (&cond) != 0, 0))
+ if (__glibc_unlikely (pthread_cond_destroy (&cond) != 0))
/* This must never happen. */
abort ();
#endif
ElfW(Addr) *const reloc_addr = (void *) reloc->r_offset;
const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
- if (__builtin_expect (r_type == R_390_IRELATIVE, 1))
+ if (__glibc_likely (r_type == R_390_IRELATIVE))
{
ElfW(Addr) value = elf_ifunc_invoke(reloc->r_addend);
*reloc_addr = value;
to intercept the calls to collect information. In this case we
don't store the address in the GOT so that all future calls also
end in this function. */
- if (__builtin_expect (profile, 0))
+ if (__glibc_unlikely (profile))
{
got[2] = (Elf32_Addr) &_dl_runtime_profile;
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
#if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
- if (__builtin_expect (r_type == R_390_RELATIVE, 0))
+ if (__glibc_unlikely (r_type == R_390_RELATIVE))
{
# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
/* This is defined in rtld.c, but nowhere in the static libc.a;
}
else
#endif
- if (__builtin_expect (r_type == R_390_NONE, 0))
+ if (__glibc_unlikely (r_type == R_390_NONE))
return;
else
{
{
case R_390_IRELATIVE:
value = map->l_addr + reloc->r_addend;
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
break;
Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
/* Check for unexpected PLT reloc type. */
- if (__builtin_expect (r_type == R_390_JMP_SLOT, 1))
+ if (__glibc_likely (r_type == R_390_JMP_SLOT))
{
if (__builtin_expect (map->l_mach.plt, 0) == 0)
*reloc_addr += l_addr;
map->l_mach.plt
+ (((Elf32_Addr) reloc_addr) - map->l_mach.gotplt) * 8;
}
- else if (__builtin_expect (r_type == R_390_IRELATIVE, 1))
+ else if (__glibc_likely (r_type == R_390_IRELATIVE))
{
Elf32_Addr value = map->l_addr + reloc->r_addend;
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
}
to intercept the calls to collect information. In this case we
don't store the address in the GOT so that all future calls also
end in this function. */
- if (__builtin_expect (profile, 0))
+ if (__glibc_unlikely (profile))
{
got[2] = (Elf64_Addr) &_dl_runtime_profile;
const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
#if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
- if (__builtin_expect (r_type == R_390_RELATIVE, 0))
+ if (__glibc_unlikely (r_type == R_390_RELATIVE))
{
# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
/* This is defined in rtld.c, but nowhere in the static libc.a;
}
else
#endif
- if (__builtin_expect (r_type == R_390_NONE, 0))
+ if (__glibc_unlikely (r_type == R_390_NONE))
return;
else
{
{
case R_390_IRELATIVE:
value = map->l_addr + reloc->r_addend;
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
break;
Elf64_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
/* Check for unexpected PLT reloc type. */
- if (__builtin_expect (r_type == R_390_JMP_SLOT, 1))
+ if (__glibc_likely (r_type == R_390_JMP_SLOT))
{
if (__builtin_expect (map->l_mach.plt, 0) == 0)
*reloc_addr += l_addr;
map->l_mach.plt
+ (((Elf64_Addr) reloc_addr) - map->l_mach.gotplt) * 4;
}
- else if (__builtin_expect (r_type == R_390_IRELATIVE, 1))
+ else if (__glibc_likely (r_type == R_390_IRELATIVE))
{
Elf64_Addr value = map->l_addr + reloc->r_addend;
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
*reloc_addr = value;
}
if (dir == to_utf16) \
{ \
/* Emit the UTF-16 Byte Order Mark. */ \
- if (__builtin_expect (outbuf + 2 > outend, 0)) \
+ if (__glibc_unlikely (outbuf + 2 > outend)) \
return __GCONV_FULL_OUTPUT; \
\
put16u (outbuf, BOM_UTF16); \
else \
{ \
/* Emit the UTF-32 Byte Order Mark. */ \
- if (__builtin_expect (outbuf + 4 > outend, 0)) \
+ if (__glibc_unlikely (outbuf + 4 > outend)) \
return __GCONV_FULL_OUTPUT; \
\
put32u (outbuf, BOM_UTF32); \
{ \
/* An isolated low-surrogate was found. This has to be \
considered ill-formed. */ \
- if (__builtin_expect (u1 >= 0xdc00, 0)) \
+ if (__glibc_unlikely (u1 >= 0xdc00)) \
{ \
STANDARD_FROM_LOOP_ERR_HANDLER (2); \
} \
/* It's a surrogate character. At least the first word says \
it is. */ \
- if (__builtin_expect (inptr + 4 > inend, 0)) \
+ if (__glibc_unlikely (inptr + 4 > inend)) \
{ \
/* We don't have enough input for another complete input \
character. */ \
uint16_t out; \
\
/* Generate a surrogate character. */ \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
&& data->__invocation_counter == 0) \
{ \
/* Emit the UTF-16 Byte Order Mark. */ \
- if (__builtin_expect (outbuf + 2 > outend, 0)) \
+ if (__glibc_unlikely (outbuf + 2 > outend)) \
return __GCONV_FULL_OUTPUT; \
\
put16u (outbuf, BOM_UTF16); \
if ((inptr[i] & 0xc0) != 0x80) \
break; \
\
- if (__builtin_expect (inptr + i == inend, 1)) \
+ if (__glibc_likely (inptr + i == inend)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
/* Next input byte. */ \
uint16_t ch = *inptr; \
\
- if (__builtin_expect (ch < 0x80, 1)) \
+ if (__glibc_likely (ch < 0x80)) \
{ \
/* One byte sequence. */ \
++inptr; \
cnt = 2; \
ch &= 0x1f; \
} \
- else if (__builtin_expect ((ch & 0xf0) == 0xe0, 1)) \
+ else if (__glibc_likely ((ch & 0xf0) == 0xe0)) \
{ \
/* We expect three bytes. */ \
cnt = 3; \
ch &= 0x0f; \
} \
- else if (__builtin_expect ((ch & 0xf8) == 0xf0, 1)) \
+ else if (__glibc_likely ((ch & 0xf8) == 0xf0)) \
{ \
/* We expect four bytes. */ \
cnt = 4; \
STANDARD_FROM_LOOP_ERR_HANDLER (i); \
} \
\
- if (__builtin_expect (inptr + cnt > inend, 0)) \
+ if (__glibc_unlikely (inptr + cnt > inend)) \
{ \
/* We don't have enough input. But before we report \
that check that all the bytes are correct. */ \
if ((inptr[i] & 0xc0) != 0x80) \
break; \
\
- if (__builtin_expect (inptr + i == inend, 1)) \
+ if (__glibc_likely (inptr + i == inend)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
low) are needed. */ \
uint16_t zabcd, high, low; \
\
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
\
uint16_t c = get16 (inptr); \
\
- if (__builtin_expect (c <= 0x007f, 1)) \
+ if (__glibc_likely (c <= 0x007f)) \
{ \
/* Single byte UTF-8 char. */ \
*outptr = c & 0xff; \
{ \
/* Two byte UTF-8 char. */ \
\
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
{ \
/* Three byte UTF-8 char. */ \
\
- if (__builtin_expect (outptr + 3 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
/* Four byte UTF-8 char. */ \
uint16_t low, uvwxy; \
\
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
break; \
} \
inptr += 2; \
- if (__builtin_expect (inptr + 2 > inend, 0)) \
+ if (__glibc_unlikely (inptr + 2 > inend)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
&& data->__invocation_counter == 0) \
{ \
/* Emit the Byte Order Mark. */ \
- if (__builtin_expect (outbuf + 4 > outend, 0)) \
+ if (__glibc_unlikely (outbuf + 4 > outend)) \
return __GCONV_FULL_OUTPUT; \
\
put32u (outbuf, BOM); \
if ((inptr[i] & 0xc0) != 0x80) \
break; \
\
- if (__builtin_expect (inptr + i == inend, 1)) \
+ if (__glibc_likely (inptr + i == inend)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
/* Next input byte. */ \
uint32_t ch = *inptr; \
\
- if (__builtin_expect (ch < 0x80, 1)) \
+ if (__glibc_likely (ch < 0x80)) \
{ \
/* One byte sequence. */ \
++inptr; \
cnt = 2; \
ch &= 0x1f; \
} \
- else if (__builtin_expect ((ch & 0xf0) == 0xe0, 1)) \
+ else if (__glibc_likely ((ch & 0xf0) == 0xe0)) \
{ \
/* We expect three bytes. */ \
cnt = 3; \
ch &= 0x0f; \
} \
- else if (__builtin_expect ((ch & 0xf8) == 0xf0, 1)) \
+ else if (__glibc_likely ((ch & 0xf8) == 0xf0)) \
{ \
/* We expect four bytes. */ \
cnt = 4; \
ch &= 0x07; \
} \
- else if (__builtin_expect ((ch & 0xfc) == 0xf8, 1)) \
+ else if (__glibc_likely ((ch & 0xfc) == 0xf8)) \
{ \
/* We expect five bytes. */ \
cnt = 5; \
ch &= 0x03; \
} \
- else if (__builtin_expect ((ch & 0xfe) == 0xfc, 1)) \
+ else if (__glibc_likely ((ch & 0xfe) == 0xfc)) \
{ \
/* We expect six bytes. */ \
cnt = 6; \
STANDARD_FROM_LOOP_ERR_HANDLER (i); \
} \
\
- if (__builtin_expect (inptr + cnt > inend, 0)) \
+ if (__glibc_unlikely (inptr + cnt > inend)) \
{ \
/* We don't have enough input. But before we report \
that check that all the bytes are correct. */ \
if ((inptr[i] & 0xc0) != 0x80) \
break; \
\
- if (__builtin_expect (inptr + i == inend, 1)) \
+ if (__glibc_likely (inptr + i == inend)) \
{ \
result = __GCONV_INCOMPLETE_INPUT; \
break; \
cnt = 2; \
ch &= 0x1f; \
} \
- else if (__builtin_expect ((ch & 0xf0) == 0xe0, 1)) \
+ else if (__glibc_likely ((ch & 0xf0) == 0xe0)) \
{ \
/* We expect three bytes. */ \
cnt = 3; \
ch &= 0x0f; \
} \
- else if (__builtin_expect ((ch & 0xf8) == 0xf0, 1)) \
+ else if (__glibc_likely ((ch & 0xf8) == 0xf0)) \
{ \
/* We expect four bytes. */ \
cnt = 4; \
ch &= 0x07; \
} \
- else if (__builtin_expect ((ch & 0xfc) == 0xf8, 1)) \
+ else if (__glibc_likely ((ch & 0xfc) == 0xf8)) \
{ \
/* We expect five bytes. */ \
cnt = 5; \
\
uint32_t wc = *((const uint32_t *) inptr); \
\
- if (__builtin_expect (wc <= 0x7f, 1)) \
+ if (__glibc_likely (wc <= 0x7f)) \
{ \
/* Single UTF-8 char. */ \
*outptr = (uint8_t)wc; \
else if (wc <= 0x7ff) \
{ \
/* Two UTF-8 chars. */ \
- if (__builtin_expect (outptr + 2 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 2 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
else if (wc <= 0xffff) \
{ \
/* Three UTF-8 chars. */ \
- if (__builtin_expect (outptr + 3 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 3 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
else if (wc <= 0x10ffff) \
{ \
/* Four UTF-8 chars. */ \
- if (__builtin_expect (outptr + 4 > outend, 0)) \
+ if (__glibc_unlikely (outptr + 4 > outend)) \
{ \
/* Overflow in the output buffer. */ \
result = __GCONV_FULL_OUTPUT; \
} \
}
- if (__builtin_expect (r_type == R_SH_RELATIVE, 0))
+ if (__glibc_unlikely (r_type == R_SH_RELATIVE))
{
#ifndef RTLD_BOOTSTRAP
if (map != &GL(dl_rtld_map)) /* Already done in rtld itself. */
}
}
#ifndef RTLD_BOOTSTRAP
- else if (__builtin_expect (r_type == R_SH_NONE, 0))
+ else if (__glibc_unlikely (r_type == R_SH_NONE))
return;
#endif
else
{
unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
- if (__builtin_expect (r_type == R_SPARC_IRELATIVE, 1))
+ if (__glibc_likely (r_type == R_SPARC_IRELATIVE))
{
Elf32_Addr *const reloc_addr = (void *) reloc->r_offset;
Elf32_Addr value = elf_ifunc_invoke(reloc->r_addend);
*reloc_addr = value;
}
- else if (__builtin_expect (r_type == R_SPARC_JMP_IREL, 1))
+ else if (__glibc_likely (r_type == R_SPARC_JMP_IREL))
{
Elf32_Addr *const reloc_addr = (void *) reloc->r_offset;
Elf32_Addr value = elf_ifunc_invoke(reloc->r_addend);
weak_extern (_dl_rtld_map);
#endif
- if (__builtin_expect (r_type == R_SPARC_NONE, 0))
+ if (__glibc_unlikely (r_type == R_SPARC_NONE))
return;
- if (__builtin_expect (r_type == R_SPARC_SIZE32, 0))
+ if (__glibc_unlikely (r_type == R_SPARC_SIZE32))
{
*reloc_addr = sym->st_size + reloc->r_addend;
return;
}
#if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
- if (__builtin_expect (r_type == R_SPARC_RELATIVE, 0))
+ if (__glibc_unlikely (r_type == R_SPARC_RELATIVE))
{
# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
if (map != &_dl_rtld_map) /* Already done in rtld itself. */
Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
- if (__builtin_expect (r_type == R_SPARC_JMP_SLOT, 1))
+ if (__glibc_likely (r_type == R_SPARC_JMP_SLOT))
;
else if (r_type == R_SPARC_JMP_IREL)
{
Elf32_Addr value = map->l_addr + reloc->r_addend;
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
sparc_fixup_plt (reloc, reloc_addr, value, 1, 1);
}
{
unsigned int r_type = (reloc->r_info & 0xff);
- if (__builtin_expect (r_type == R_SPARC_IRELATIVE, 1))
+ if (__glibc_likely (r_type == R_SPARC_IRELATIVE))
{
Elf64_Addr *const reloc_addr = (void *) reloc->r_offset;
Elf64_Addr value = elf_ifunc_invoke(reloc->r_addend);
*reloc_addr = value;
}
- else if (__builtin_expect (r_type == R_SPARC_JMP_IREL, 1))
+ else if (__glibc_likely (r_type == R_SPARC_JMP_IREL))
{
Elf64_Addr *const reloc_addr = (void *) reloc->r_offset;
Elf64_Addr value = elf_ifunc_invoke(reloc->r_addend);
weak_extern (_dl_rtld_map);
#endif
- if (__builtin_expect (r_type == R_SPARC_NONE, 0))
+ if (__glibc_unlikely (r_type == R_SPARC_NONE))
return;
- if (__builtin_expect (r_type == R_SPARC_SIZE64, 0))
+ if (__glibc_unlikely (r_type == R_SPARC_SIZE64))
{
*reloc_addr = sym->st_size + reloc->r_addend;
return;
}
#if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
- if (__builtin_expect (r_type == R_SPARC_RELATIVE, 0))
+ if (__glibc_unlikely (r_type == R_SPARC_RELATIVE))
{
# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
if (map != &_dl_rtld_map) /* Already done in rtld itself. */
Elf64_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
- if (__builtin_expect (r_type == R_SPARC_JMP_SLOT, 1))
+ if (__glibc_likely (r_type == R_SPARC_JMP_SLOT))
;
else if (r_type == R_SPARC_JMP_IREL
|| r_type == R_SPARC_IRELATIVE)
{
Elf64_Addr value = map->l_addr + reloc->r_addend;
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
if (r_type == R_SPARC_JMP_IREL)
{
/* ??? Some tricks can be stolen from the sparc64 egcs backend
constant formation code I wrote. -DaveM */
- if (__builtin_expect (high32 & 0x3ff, 0))
+ if (__glibc_unlikely (high32 & 0x3ff))
{
/* sethi %hh(value), %g1
sethi %lm(value), %g5
{
hp_timing_t tsc;
- if (__builtin_expect (freq == 0, 0))
+ if (__glibc_unlikely (freq == 0))
{
/* This can only happen if we haven't initialized the `freq'
variable yet. Do this now. We don't have to protect this
code against multiple execution since all of them should
lead to the same result. */
freq = __get_clockfreq ();
- if (__builtin_expect (freq == 0, 0))
+ if (__glibc_unlikely (freq == 0))
/* Something went wrong. */
return -1;
}
/* First thing is to get the current time. */
HP_TIMING_NOW (tsc);
- if (__builtin_expect (freq == 0, 0))
+ if (__glibc_unlikely (freq == 0))
{
/* This can only happen if we haven't initialized the `freq'
variable yet. Do this now. We don't have to protect this
code against multiple execution since all of them should lead
to the same result. */
freq = __get_clockfreq ();
- if (__builtin_expect (freq == 0, 0))
+ if (__glibc_unlikely (freq == 0))
/* Something went wrong. */
return -1;
}
char *buf = _buf;
struct stat64 st;
- if (__builtin_expect (pts_name (fd, &buf, sizeof (_buf), &st), 0))
+ if (__glibc_unlikely (pts_name (fd, &buf, sizeof (_buf), &st)))
{
int save_errno = errno;
}
static int tty_gid = -1;
- if (__builtin_expect (tty_gid == -1, 0))
+ if (__glibc_unlikely (tty_gid == -1))
{
char *grtmpbuf;
struct group grbuf;
int
accept4 (int fd, __SOCKADDR_ARG addr, socklen_t *addr_len, int flags)
{
- if (__builtin_expect (have_accept4 >= 0, 1))
+ if (__glibc_likely (have_accept4 >= 0))
{
int ret = __internal_accept4 (fd, addr, addr_len, flags);
/* The kernel returns -EINVAL for unknown socket operations.
#if defined ADJ_OFFSET_SS_READ && !defined __ASSUME_ADJ_OFFSET_SS_READ
again:
#endif
- if (__builtin_expect (ADJTIMEX (&tntx) < 0, 0))
+ if (__glibc_unlikely (ADJTIMEX (&tntx) < 0))
{
#if defined ADJ_OFFSET_SS_READ && !defined __ASSUME_ADJ_OFFSET_SS_READ
if (itv && errno == EINVAL && tntx.modes == ADJ_OFFSET_SS_READ)
{
int fd = __socket (PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
- if (__builtin_expect (fd >= 0, 1))
+ if (__glibc_likely (fd >= 0))
{
struct sockaddr_nl nladdr;
memset (&nladdr, '\0', sizeof (nladdr));
if the library is not compiled to run on all kernels. */ \
\
int version = _dl_discover_osversion (); \
- if (__builtin_expect (version >= 0, 1)) \
+ if (__glibc_likely (version >= 0)) \
{ \
if (__builtin_expect (GLRO(dl_osversion) == 0, 1) \
|| GLRO(dl_osversion) > version) \
} ret;
#ifndef __ASSUME_AT_RANDOM
- if (__builtin_expect (dl_random == NULL, 0))
+ if (__glibc_unlikely (dl_random == NULL))
{
const size_t filllen = sizeof (ret.bytes) - 1;
ret.num = 0;
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
# endif
result = INTERNAL_SYSCALL (access, err, 2, file, mode);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
result = -1;
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
# endif
result = INTERNAL_SYSCALL (chmod, err, 2, file, mode);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
result = -1;
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
else
result = INTERNAL_SYSCALL (chown, err, 3, file, owner, group);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
result = -1;
else if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
# ifdef __NR_utimes
result = INTERNAL_SYSCALL (utimes, err, 2, file, tvp);
- if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
+ if (__glibc_likely (!INTERNAL_SYSCALL_ERROR_P (result, err)))
return result;
# ifndef __ASSUME_UTIMES
times = NULL;
result = INTERNAL_SYSCALL (utime, err, 2, file, times);
- if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
+ if (__glibc_likely (!INTERNAL_SYSCALL_ERROR_P (result, err)))
return result;
fail:
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
result = INTERNAL_SYSCALL (stat, err, 2, file,
(struct kernel_stat *) st);
- if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
+ if (__glibc_likely (!INTERNAL_SYSCALL_ERROR_P (result, err)))
return result;
}
#ifdef STAT_IS_KERNEL_STAT
else
result = INTERNAL_SYSCALL (stat, err, 2, file, &kst);
- if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
+ if (__glibc_likely (!INTERNAL_SYSCALL_ERROR_P (result, err)))
return __xstat_conv (vers, &kst, st);
#endif
int
__fxstatat64 (int vers, int fd, const char *file, struct stat64 *st, int flag)
{
- if (__builtin_expect (vers != _STAT_VER_LINUX, 0))
+ if (__glibc_unlikely (vers != _STAT_VER_LINUX))
{
__set_errno (EINVAL);
return -1;
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
result = INTERNAL_SYSCALL (lstat64, err, 2, file, st);
else
result = INTERNAL_SYSCALL (stat64, err, 2, file, st);
- if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
+ if (__glibc_likely (!INTERNAL_SYSCALL_ERROR_P (result, err)))
{
# if defined _HAVE_STAT64___ST_INO && __ASSUME_ST_INO_64_BIT == 0
if (st->__st_ino != (__ino_t) st->st_ino)
LIBC_CANCEL_RESET (oldtype);
}
- if (__builtin_expect (err, 0))
+ if (__glibc_unlikely (err))
{
__set_errno (err);
err = -1;
LIBC_CANCEL_RESET (oldtype);
}
- if (__builtin_expect (err, 0))
+ if (__glibc_unlikely (err))
{
__set_errno (err);
err = -1;
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
else
result = INTERNAL_SYSCALL (chown32, err, 3, file, owner, group);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
return -1;
#endif
#ifndef __ASSUME_ATFCTS
- if (__builtin_expect (flag & ~AT_SYMLINK_NOFOLLOW, 0))
+ if (__glibc_unlikely (flag & ~AT_SYMLINK_NOFOLLOW))
{
__set_errno (EINVAL);
return -1;
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
result = INTERNAL_SYSCALL (lstat64, err, 2, file, &st64);
else
result = INTERNAL_SYSCALL (stat64, err, 2, file, &st64);
- if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
+ if (__glibc_likely (!INTERNAL_SYSCALL_ERROR_P (result, err)))
return __xstat32_conv (vers, &st64, st);
out:
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
result = -1;
return result;
fd = __open ("/proc/cpuinfo", O_RDONLY);
- if (__builtin_expect (fd != -1, 1))
+ if (__glibc_likely (fd != -1))
{
/* XXX AFAIK the /proc filesystem can generate "files" only up
to a size of 4096 bytes. */
{
char *mhz = memmem (buf, n, "cpu MHz", 7);
- if (__builtin_expect (mhz != NULL, 1))
+ if (__glibc_likely (mhz != NULL))
{
char *endp = buf + n;
int seen_decpoint = 0;
{
#ifdef __NR_fallocate
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (__have_fallocate >= 0, 1))
+ if (__glibc_likely (__have_fallocate >= 0))
# endif
{
int res = __call_fallocate (fd, 0, offset, len);
return 0;
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (res == ENOSYS, 0))
+ if (__glibc_unlikely (res == ENOSYS))
__have_fallocate = -1;
else
# endif
{
#ifdef __NR_fallocate
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (__have_fallocate >= 0, 1))
+ if (__glibc_likely (__have_fallocate >= 0))
# endif
{
int res = __call_fallocate (fd, 0, offset, len);
return 0;
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (res == ENOSYS, 0))
+ if (__glibc_unlikely (res == ENOSYS))
__have_fallocate = -1;
else
# endif
/* Ignore errors from select or readdir */
__set_errno (0);
- if (__builtin_expect (c.cnt == vsize, 0))
+ if (__glibc_unlikely (c.cnt == vsize))
{
struct __old_dirent64 **new;
if (vsize == 0)
#define INLINE_SYSCALL(name, nr, args...) \
({ \
unsigned int resultvar = INTERNAL_SYSCALL (name, , nr, args); \
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (resultvar, ), 0)) \
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (resultvar, ))) \
{ \
__set_errno (INTERNAL_SYSCALL_ERRNO (resultvar, )); \
resultvar = 0xffffffff; \
if (nladdr.nl_pid != 0)
continue;
- if (__builtin_expect (msg.msg_flags & MSG_TRUNC, 0))
+ if (__glibc_unlikely (msg.msg_flags & MSG_TRUNC))
goto out_fail;
size_t count = 0;
kernel. */
ifa_index = map_newlink (ifim->ifi_index - 1, ifas,
map_newlink_data, newlink);
- if (__builtin_expect (ifa_index == -1, 0))
+ if (__glibc_unlikely (ifa_index == -1))
{
try_again:
result = -EAGAIN;
ifa_index = newlink + newaddr_idx;
int idx = map_newlink (ifam->ifa_index - 1, ifas,
map_newlink_data, newlink);
- if (__builtin_expect (idx == -1, 0))
+ if (__glibc_unlikely (idx == -1))
goto try_again;
ifas[ifa_index].ifa.ifa_flags = ifas[idx].ifa.ifa_flags;
if (ifa_index > 0)
{
int idx = map_newlink (ifam->ifa_index - 1, ifas,
map_newlink_data, newlink);
- if (__builtin_expect (idx == -1, 0))
+ if (__glibc_unlikely (idx == -1))
goto try_again;
ifas[ifa_index].ifa.ifa_name = ifas[idx].ifa.ifa_name;
}
if (fromfd != AT_FDCWD && from[0] != '/')
{
size_t filelen = strlen (from);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
result = INTERNAL_SYSCALL (link, err, 2, from, to);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno_2 (INTERNAL_SYSCALL_ERRNO (result, err), tofd, bufto,
fromfd, buffrom);
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
INTERNAL_SYSCALL_DECL (err);
res = INTERNAL_SYSCALL (mkdir, err, 2, file, mode);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (res, err), fd, buf);
res = -1;
/* While unlink can return either EPERM or EACCES, mq_unlink should
return just EACCES. */
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (ret, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (ret, err)))
{
ret = INTERNAL_SYSCALL_ERRNO (ret, err);
if (ret == EPERM)
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
res = INTERNAL_SYSCALL (open, err, 3, file, oflag, mode);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (res, err), fd, buf);
res = -1;
{
#ifdef __NR_fallocate
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (__have_fallocate >= 0, 1))
+ if (__glibc_likely (__have_fallocate >= 0))
# endif
{
INTERNAL_SYSCALL_DECL (err);
return 0;
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS, 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS))
__have_fallocate = -1;
else
# endif
{
#ifdef __NR_fallocate
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (__have_fallocate >= 0, 1))
+ if (__glibc_likely (__have_fallocate >= 0))
# endif
{
INTERNAL_SYSCALL_DECL (err);
return 0;
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS, 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS))
__have_fallocate = -1;
else
# endif
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
else
result = INTERNAL_SYSCALL (chown, err, 3, file, owner, group);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
return -1;
{
int fd = __open ("/proc/cpuinfo", O_RDONLY);
- if (__builtin_expect (fd != -1, 1))
+ if (__glibc_likely (fd != -1))
{
/* The timebase will be in the 1st 1024 bytes for systems with up
to 8 processors. If the first read returns less then 1024
{
char *mhz = memmem (buf, n, "timebase", 7);
- if (__builtin_expect (mhz != NULL, 1))
+ if (__glibc_likely (mhz != NULL))
{
char *endp = buf + n;
if (fd != AT_FDCWD && path[0] != '/')
{
size_t pathlen = strlen (path);
- if (__builtin_expect (pathlen == 0, 0))
+ if (__glibc_unlikely (pathlen == 0))
{
__set_errno (ENOENT);
return -1;
result = INTERNAL_SYSCALL (readlink, err, 3, path, buf, len);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, pathbuf);
result = -1;
recvmmsg (int fd, struct mmsghdr *vmessages, unsigned int vlen, int flags,
const struct timespec *tmo)
{
- if (__builtin_expect (have_recvmmsg >= 0, 1))
+ if (__glibc_likely (have_recvmmsg >= 0))
{
int ret = __internal_recvmmsg (fd, vmessages, vlen, flags, tmo);
/* The kernel returns -EINVAL for unknown socket operations.
if (oldfd != AT_FDCWD && old[0] != '/')
{
size_t filelen = strlen (old);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
if (newfd != AT_FDCWD && new[0] != '/')
{
size_t filelen = strlen (new);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
result = INTERNAL_SYSCALL (rename, err, 2, old, new);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno_2 (INTERNAL_SYSCALL_ERRNO (result, err), newfd, bufnew,
oldfd, bufold);
#define INLINE_SYSCALL(name, nr, args...) \
({ \
unsigned int _ret = INTERNAL_SYSCALL (name, , nr, args); \
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (_ret, ), 0)) \
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (_ret, ))) \
{ \
__set_errno (INTERNAL_SYSCALL_ERRNO (_ret, )); \
_ret = 0xffffffff; \
#define INLINE_SYSCALL(name, nr, args...) \
({ \
long _ret = INTERNAL_SYSCALL (name, , nr, args); \
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (_ret, ), 0)) \
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (_ret, ))) \
{ \
__set_errno (INTERNAL_SYSCALL_ERRNO (_ret, )); \
_ret = -1; \
int
__sched_setaffinity_new (pid_t pid, size_t cpusetsize, const cpu_set_t *cpuset)
{
- if (__builtin_expect (__kernel_cpumask_size == 0, 0))
+ if (__glibc_unlikely (__kernel_cpumask_size == 0))
{
INTERNAL_SYSCALL_DECL (err);
int res;
int
__sendmmsg (int fd, struct mmsghdr *vmessages, unsigned int vlen, int flags)
{
- if (__builtin_expect (have_sendmmsg >= 0, 1))
+ if (__glibc_likely (have_sendmmsg >= 0))
{
int ret = __internal_sendmmsg (fd, vmessages, vlen, flags);
/* The kernel returns -EINVAL for unknown socket operations.
/* OK, do it the hard way. Look through the /proc/mounts file and if
this does not exist through /etc/fstab to find the mount point. */
fp = __setmntent ("/proc/mounts", "r");
- if (__builtin_expect (fp == NULL, 0))
+ if (__glibc_unlikely (fp == NULL))
{
fp = __setmntent (_PATH_MNTTAB, "r");
- if (__builtin_expect (fp == NULL, 0))
+ if (__glibc_unlikely (fp == NULL))
/* There is nothing we can do. Blind guesses are not helpful. */
return;
}
}
#endif
}
- else if (__builtin_expect (errno == EISDIR, 0))
+ else if (__glibc_unlikely (errno == EISDIR))
/* It might be better to fold this error with EINVAL since
directory names are just another example for unsuitable shared
object names and the standard does not mention EISDIR. */
unsigned int result;
/* This is not necessary but some buggy programs depend on this. */
- if (__builtin_expect (seconds == 0, 0))
+ if (__glibc_unlikely (seconds == 0))
{
#ifdef CANCELLATION_P
CANCELLATION_P (THREAD_SELF);
if (tofd != AT_FDCWD && to[0] != '/')
{
size_t tolen = strlen (to);
- if (__builtin_expect (tolen == 0, 0))
+ if (__glibc_unlikely (tolen == 0))
{
__set_errno (ENOENT);
return -1;
result = INTERNAL_SYSCALL (symlink, err, 2, from, to);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), tofd, buf);
result = -1;
retval = INLINE_SYSCALL (ioctl, 3, fd, TCGETS, &k_termios);
- if (__builtin_expect (retval == 0, 1))
+ if (__glibc_likely (retval == 0))
{
termios_p->c_iflag = k_termios.c_iflag;
termios_p->c_oflag = k_termios.c_oflag;
/* isatty check, tcgetattr is used because it sets the correct
errno (EBADF resp. ENOTTY) on error. */
- if (__builtin_expect (__tcgetattr (fd, &term) < 0, 0))
+ if (__glibc_unlikely (__tcgetattr (fd, &term) < 0))
return NULL;
if (__fxstat64 (_STAT_VER, fd, &st) < 0)
}
ssize_t len = __readlink (procname, ttyname_buf, buflen);
- if (__builtin_expect (len != -1, 1))
+ if (__glibc_likely (len != -1))
{
if ((size_t) len >= buflen)
return NULL;
/* isatty check, tcgetattr is used because it sets the correct
errno (EBADF resp. ENOTTY) on error. */
struct termios term;
- if (__builtin_expect (__tcgetattr (fd, &term) < 0, 0))
+ if (__glibc_unlikely (__tcgetattr (fd, &term) < 0))
return errno;
if (__fxstat64 (_STAT_VER, fd, &st) < 0)
*_fitoa_word (fd, __stpcpy (procname, "/proc/self/fd/"), 10, 0) = '\0';
ssize_t ret = __readlink (procname, buf, buflen - 1);
- if (__builtin_expect (ret == -1 && errno == ENAMETOOLONG, 0))
+ if (__glibc_unlikely (ret == -1 && errno == ENAMETOOLONG))
{
__set_errno (ERANGE);
return ERANGE;
}
- if (__builtin_expect (ret != -1, 1))
+ if (__glibc_likely (ret != -1))
{
#define UNREACHABLE_LEN strlen ("(unreachable)")
if (ret > UNREACHABLE_LEN
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
else
result = INTERNAL_SYSCALL (unlink, err, 1, file);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
result = -1;
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
else
res = INTERNAL_SYSCALL (stat, err, 2, file, st);
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
{
__atfct_seterrno (INTERNAL_SYSCALL_ERRNO (res, err), fd, buf);
res = -1;
{
#ifdef __NR_fallocate
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (__have_fallocate >= 0, 1))
+ if (__glibc_likely (__have_fallocate >= 0))
# endif
{
INTERNAL_SYSCALL_DECL (err);
return 0;
# ifndef __ASSUME_FALLOCATE
- if (__builtin_expect (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS, 0))
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS))
__have_fallocate = -1;
else
# endif
# define INLINE_SYSCALL(name, nr, args...) \
({ \
unsigned long int resultvar = INTERNAL_SYSCALL (name, , nr, args); \
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (resultvar, ), 0)) \
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (resultvar, ))) \
{ \
__set_errno (INTERNAL_SYSCALL_ERRNO (resultvar, )); \
resultvar = (unsigned long int) -1; \
# define INLINE_SYSCALL_TYPES(name, nr, args...) \
({ \
unsigned long int resultvar = INTERNAL_SYSCALL_TYPES (name, , nr, args); \
- if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (resultvar, ), 0)) \
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (resultvar, ))) \
{ \
__set_errno (INTERNAL_SYSCALL_ERRNO (resultvar, )); \
resultvar = (unsigned long int) -1; \
if (fd != AT_FDCWD && file[0] != '/')
{
size_t filelen = strlen (file);
- if (__builtin_expect (filelen == 0, 0))
+ if (__glibc_unlikely (filelen == 0))
{
__set_errno (ENOENT);
return -1;
ElfW(Addr) *const reloc_addr = (void *) reloc->r_offset;
const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
- if (__builtin_expect (r_type == R_X86_64_IRELATIVE, 1))
+ if (__glibc_likely (r_type == R_X86_64_IRELATIVE))
{
ElfW(Addr) value = elf_ifunc_invoke(reloc->r_addend);
*reloc_addr = value;
to intercept the calls to collect information. In this case we
don't store the address in the GOT so that all future calls also
end in this function. */
- if (__builtin_expect (profile, 0))
+ if (__glibc_unlikely (profile))
{
*(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile;
const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
# if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
- if (__builtin_expect (r_type == R_X86_64_RELATIVE, 0))
+ if (__glibc_unlikely (r_type == R_X86_64_RELATIVE))
{
# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
/* This is defined in rtld.c, but nowhere in the static libc.a;
# if !defined RTLD_BOOTSTRAP
/* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
relocation updates the whole 64-bit entry. */
- if (__builtin_expect (r_type == R_X86_64_RELATIVE64, 0))
+ if (__glibc_unlikely (r_type == R_X86_64_RELATIVE64))
*(Elf64_Addr *) reloc_addr = (Elf64_Addr) map->l_addr + reloc->r_addend;
else
# endif
- if (__builtin_expect (r_type == R_X86_64_NONE, 0))
+ if (__glibc_unlikely (r_type == R_X86_64_NONE))
return;
else
{
*(unsigned int *) reloc_addr = value;
const char *fmt;
- if (__builtin_expect (value > UINT_MAX, 0))
+ if (__glibc_unlikely (value > UINT_MAX))
{
const char *strtab;
case R_X86_64_PC32:
value += reloc->r_addend - (ElfW(Addr)) reloc_addr;
*(unsigned int *) reloc_addr = value;
- if (__builtin_expect (value != (int) value, 0))
+ if (__glibc_unlikely (value != (int) value))
{
fmt = "\
%s: Symbol `%s' causes overflow in R_X86_64_PC32 relocation\n";
#if !defined RTLD_BOOTSTRAP
/* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
relocation updates the whole 64-bit entry. */
- if (__builtin_expect (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE64, 0))
+ if (__glibc_unlikely (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE64))
*(Elf64_Addr *) reloc_addr = (Elf64_Addr) l_addr + reloc->r_addend;
else
#endif
const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
/* Check for unexpected PLT reloc type. */
- if (__builtin_expect (r_type == R_X86_64_JUMP_SLOT, 1))
+ if (__glibc_likely (r_type == R_X86_64_JUMP_SLOT))
{
if (__builtin_expect (map->l_mach.plt, 0) == 0)
*reloc_addr += l_addr;
map->l_mach.plt
+ (((ElfW(Addr)) reloc_addr) - map->l_mach.gotplt) * 2;
}
- else if (__builtin_expect (r_type == R_X86_64_TLSDESC, 1))
+ else if (__glibc_likely (r_type == R_X86_64_TLSDESC))
{
struct tlsdesc volatile * __attribute__((__unused__)) td =
(struct tlsdesc volatile *)reloc_addr;
td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
+ map->l_addr);
}
- else if (__builtin_expect (r_type == R_X86_64_IRELATIVE, 0))
+ else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
{
ElfW(Addr) value = map->l_addr + reloc->r_addend;
- if (__builtin_expect (!skip_ifunc, 1))
+ if (__glibc_likely (!skip_ifunc))
value = ((ElfW(Addr) (*) (void)) value) ();
*reloc_addr = value;
}
this would mean the output needs more space. This would not be a
problem if the 'asctime_r' interface would be defined sanely and
a buffer size would be passed. */
- if (__builtin_expect (tp->tm_year > INT_MAX - 1900, 0))
+ if (__glibc_unlikely (tp->tm_year > INT_MAX - 1900))
{
eoverflow:
__set_errno (EOVERFLOW);
> (SIZE_MAX - total_size) / sizeof (struct ttinfo), 0))
goto lose;
total_size += num_types * sizeof (struct ttinfo);
- if (__builtin_expect (chars > SIZE_MAX - total_size, 0))
+ if (__glibc_unlikely (chars > SIZE_MAX - total_size))
goto lose;
total_size += chars;
if (__builtin_expect (__alignof__ (struct leap) - 1
|| tzspec_len < num_leaps * 12, 0))
goto lose;
tzspec_len -= num_leaps * 12;
- if (__builtin_expect (tzspec_len < num_isstd, 0))
+ if (__glibc_unlikely (tzspec_len < num_isstd))
goto lose;
tzspec_len -= num_isstd;
- if (__builtin_expect (tzspec_len == 0 || tzspec_len - 1 < num_isgmt, 0))
+ if (__glibc_unlikely (tzspec_len == 0 || tzspec_len - 1 < num_isgmt))
goto lose;
tzspec_len -= num_isgmt + 1;
- if (__builtin_expect (SIZE_MAX - total_size < tzspec_len, 0))
+ if (__glibc_unlikely (SIZE_MAX - total_size < tzspec_len))
goto lose;
}
- if (__builtin_expect (SIZE_MAX - total_size - tzspec_len < extra, 0))
+ if (__glibc_unlikely (SIZE_MAX - total_size - tzspec_len < extra))
goto lose;
/* Allocate enough memory including the extra block requested by the
/* Check for bogus indices in the data file, so we can hereafter
safely use type_idxs[T] as indices into `types' and never crash. */
for (i = 0; i < num_transitions; ++i)
- if (__builtin_expect (type_idxs[i] >= num_types, 0))
+ if (__glibc_unlikely (type_idxs[i] >= num_types))
goto lose;
if ((BYTE_ORDER != BIG_ENDIAN && (sizeof (time_t) == 4 || trans_width == 4))
0))
goto lose;
c = getc_unlocked (f);
- if (__builtin_expect ((unsigned int) c > 1u, 0))
+ if (__glibc_unlikely ((unsigned int) c > 1u))
goto lose;
types[i].isdst = c;
c = getc_unlocked (f);
- if (__builtin_expect ((size_t) c > chars, 0))
+ if (__glibc_unlikely ((size_t) c > chars))
/* Bogus index in data file. */
goto lose;
types[i].idx = c;
types[i].offset = (long int) decode (x);
}
- if (__builtin_expect (fread_unlocked (zone_names, 1, chars, f) != chars, 0))
+ if (__glibc_unlikely (fread_unlocked (zone_names, 1, chars, f) != chars))
goto lose;
for (i = 0; i < num_leaps; ++i)
else
leaps[i].transition = (time_t) decode64 (x);
- if (__builtin_expect (fread_unlocked (x, 1, 4, f) != 4, 0))
+ if (__glibc_unlikely (fread_unlocked (x, 1, 4, f) != 4))
goto lose;
leaps[i].change = (long int) decode (x);
}
for (i = 0; i < num_isstd; ++i)
{
int c = getc_unlocked (f);
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
goto lose;
types[i].isstd = c != 0;
}
for (i = 0; i < num_isgmt; ++i)
{
int c = getc_unlocked (f);
- if (__builtin_expect (c == EOF, 0))
+ if (__glibc_unlikely (c == EOF))
goto lose;
types[i].isgmt = c != 0;
}
__tzname[0] = NULL;
__tzname[1] = NULL;
- if (__builtin_expect (num_transitions == 0 || timer < transitions[0], 0))
+ if (__glibc_unlikely (num_transitions == 0 || timer < transitions[0]))
{
/* TIMER is before any transition (or there are no transitions).
Choose the first non-DST type
++j;
}
}
- else if (__builtin_expect (timer >= transitions[num_transitions - 1], 0))
+ else if (__glibc_unlikely (timer >= transitions[num_transitions - 1]))
{
- if (__builtin_expect (tzspec == NULL, 0))
+ if (__glibc_unlikely (tzspec == NULL))
{
use_last:
i = num_transitions;
/* Convert to broken down structure. If this fails do not
use the string. */
- if (__builtin_expect (! __offtime (&timer, 0, tp), 0))
+ if (__glibc_unlikely (! __offtime (&timer, 0, tp)))
goto use_last;
/* Use the rules from the TZ string to compute the change. */
/* If tzspec comes from posixrules loaded by __tzfile_default,
override the STD and DST zone names with the ones user
requested in TZ envvar. */
- if (__builtin_expect (zone_names == (char *) &leaps[num_leaps], 0))
+ if (__glibc_unlikely (zone_names == (char *) &leaps[num_leaps]))
{
assert (num_types == 2);
__tzname[0] = __tzstring (zone_names);
++j;
}
- if (__builtin_expect (__tzname[0] == NULL, 0))
+ if (__glibc_unlikely (__tzname[0] == NULL))
__tzname[0] = __tzname[1];
i = type_idxs[i - 1];
{
/* Check for the quoted version. */
char *wp = tzbuf;
- if (__builtin_expect (*tz++ != '<', 0))
+ if (__glibc_unlikely (*tz++ != '<'))
goto out;
while (isalnum (*tz) || *tz == '+' || *tz == '-')
*wp++ = *tz++;
- if (__builtin_expect (*tz++ != '>' || wp - tzbuf < 3, 0))
+ if (__glibc_unlikely (*tz++ != '>' || wp - tzbuf < 3))
goto out;
*wp = '\0';
}
- else if (__builtin_expect (consumed < 3, 0))
+ else if (__glibc_unlikely (consumed < 3))
goto out;
else
tz += consumed;
/* Check for the quoted version. */
char *wp = tzbuf;
const char *rp = tz;
- if (__builtin_expect (*rp++ != '<', 0))
+ if (__glibc_unlikely (*rp++ != '<'))
/* Punt on name, set up the offsets. */
goto done_names;
while (isalnum (*rp) || *rp == '+' || *rp == '-')
*wp++ = *rp++;
- if (__builtin_expect (*rp++ != '>' || wp - tzbuf < 3, 0))
+ if (__glibc_unlikely (*rp++ != '>' || wp - tzbuf < 3))
/* Punt on name, set up the offsets. */
goto done_names;
*wp = '\0';
tz = rp;
}
- else if (__builtin_expect (consumed < 3, 0))
+ else if (__glibc_unlikely (consumed < 3))
/* Punt on name, set up the offsets. */
goto done_names;
else
/* Do a normal conversion. */
inbuf = (const unsigned char *) s;
endbuf = inbuf + n;
- if (__builtin_expect (endbuf < inbuf, 0))
+ if (__glibc_unlikely (endbuf < inbuf))
{
endbuf = (const unsigned char *) ~(uintptr_t) 0;
if (endbuf == inbuf)
/* Do a normal conversion. */
inbuf = (const unsigned char *) s;
endbuf = inbuf + n;
- if (__builtin_expect (endbuf < inbuf, 0))
+ if (__glibc_unlikely (endbuf < inbuf))
{
endbuf = (const unsigned char *) ~(uintptr_t) 0;
if (endbuf == inbuf)
/* We should repeat the test since while we waited some other thread
might have run this function. */
- if (__builtin_expect (new_category->private.ctype == NULL, 1))
+ if (__glibc_likely (new_category->private.ctype == NULL))
{
/* We must find the real functions. */
const char *charset_name;
static inline const struct gconv_fcts *
get_gconv_fcts (struct __locale_data *data)
{
- if (__builtin_expect (data->private.ctype == NULL, 0))
+ if (__glibc_unlikely (data->private.ctype == NULL))
{
- if (__builtin_expect (data == &_nl_C_LC_CTYPE, 0))
+ if (__glibc_unlikely (data == &_nl_C_LC_CTYPE))
return &__wcsmbs_gconv_fcts_c;
__wcsmbs_load_conv (data);
}