[PATCH] memcpy: use bhs/bls instead of bge/blt (CVE-2020-6096) [BZ #25620]

zhuyan (M) zhuyan34@huawei.com
Thu Apr 9 14:05:10 GMT 2020


An exploitable signed comparison vulnerability exists in the ARMv7
memcpy() implementation of GNU glibc. Calling memcpy() (on ARMv7
targets that utilize the GNU glibc implementation) with a negative
value for the 'num' parameter results in a signed comparison
vulnerability.

If an attacker underflows the 'num' parameter to memcpy(), this
vulnerability could lead to undefined behavior such as writing to
out-of-bounds memory and potentially remote code execution.
Furthermore, this memcpy() implementation allows for program
execution to continue in scenarios where a segmentation fault or
crash should have occurred. The dangers occur in that subsequent
execution and iterations of this code will be executed with this
corrupted data.

Reference URL: https://sourceware.org/bugzilla/attachment.cgi?id=12334&action=edit

Signed-off-by: Yan Zhu <zhuyan34@huawei.com>
---
sysdeps/arm/armv7/multiarch/memcpy_impl.S | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
index bf4ac7077f..7455bdc6c7 100644
--- a/sysdeps/arm/armv7/multiarch/memcpy_impl.S
+++ b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
@@ -268,7 +268,7 @@ ENTRY(memcpy)
        mov dst, dstin /* Preserve dstin, we need to return it.  */
       cmp count, #64
-        bge  .Lcpy_not_short
+       bhs   .Lcpy_not_short
       /* Deal with small copies quickly by dropping straight into the
          exit block.  */
@@ -351,10 +351,10 @@ ENTRY(memcpy)
 1:
       subs tmp2, count, #64     /* Use tmp2 for count.  */
-        blt    .Ltail63aligned
+       bls    .Ltail63aligned
        cmp tmp2, #512
-        bge  .Lcpy_body_long
+       bhs   .Lcpy_body_long
 .Lcpy_body_medium:                       /* Count in tmp2.  */
#ifdef USE_VFP
@@ -378,7 +378,7 @@ ENTRY(memcpy)
       add  src, src, #64
       vstr  d1, [dst, #56]
       add  dst, dst, #64
-        bge  1b
+       bhs   1b
       tst    tmp2, #0x3f
       beq  .Ldone
@@ -412,7 +412,7 @@ ENTRY(memcpy)
       ldrd  A_l, A_h, [src, #64]!
       strd  A_l, A_h, [dst, #64]!
       subs tmp2, tmp2, #64
-        bge  1b
+       bhs   1b
       tst    tmp2, #0x3f
       bne  1f
       ldr    tmp2,[sp], #FRAME_SIZE
@@ -482,7 +482,7 @@ ENTRY(memcpy)
       add  src, src, #32
        subs tmp2, tmp2, #prefetch_lines * 64 * 2
-        blt    2f
+       bls    2f
1:
       cpy_line_vfp     d3, 0
       cpy_line_vfp     d4, 64
@@ -494,7 +494,7 @@ ENTRY(memcpy)
       add  dst, dst, #2 * 64
       add  src, src, #2 * 64
       subs tmp2, tmp2, #prefetch_lines * 64
-        bge  1b
+       bhs   1b
 2:
       cpy_tail_vfp     d3, 0
--
2.12.3


More information about the Libc-alpha mailing list