This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH] Fix i386 memmove issue [BZ #22644]


2018-02-19 11:13 GMT+01:00 Andreas Schwab <schwab@suse.de>:
> On Feb 19 2018, Andrew Senkevich <andrew.n.senkevich@gmail.com> wrote:
>
>> diff --git a/string/test-memmove.c b/string/test-memmove.c
>> index edc7a4c..8dc152b
>> --- a/string/test-memmove.c
>> +++ b/string/test-memmove.c
>> @@ -245,6 +245,49 @@ do_random_tests (void)
>>      }
>>  }
>>
>> +#if __SIZEOF_POINTER__ == 4
>> +static void
>> +do_test2 (void)
>> +{
>> +  uint32_t i;
>> +  uint32_t num = 0x20000000;
>> +  uint32_t * large_buf = mmap (0, sizeof(uint32_t) * num, PROT_READ |
>> PROT_WRITE,
>> +        MAP_PRIVATE | MAP_ANON, -1, 0);
>> +  if (large_buf == MAP_FAILED)
>> +    error (EXIT_FAILURE, errno, "large mmap failed");
>> +
>> +  if (!((uint32_t)(large_buf) < (0x80000000 - 128) && (0x80000000 +
>> 128) < (uint32_t)(&large_buf[num])))
>> +    {
>> +      error (0, 0,"allocated large memory doesn't cross 0x80000000 boundary");
>> +      ret = 1;
>> +      return;
>
> Please properly fold long lines, and remove the redundant parens.  Also,
> there is no guarantee that the address range is unallocated.

Thanks, updated patch below.  Any comment or it is Ok for trunk?

diff --git a/string/test-memmove.c b/string/test-memmove.c
index edc7a4c..a8c4af4 100644
--- a/string/test-memmove.c
+++ b/string/test-memmove.c
@@ -245,6 +245,57 @@ do_random_tests (void)
     }
 }

+#if __SIZEOF_POINTER__ == 4
+static void
+do_test2 (void)
+{
+  uint32_t i;
+  uint32_t num = 0x20000000;
+  uint32_t * large_buf = mmap (0, sizeof(uint32_t) * num,
+        PROT_READ | PROT_WRITE,
+        MAP_PRIVATE | MAP_ANON, -1, 0);
+  if (large_buf == MAP_FAILED)
+    error (EXIT_FAILURE, errno, "Large mmap failed");
+
+  if ((uint32_t)large_buf > 0x80000000 - 128
+      || (uint32_t)&large_buf[num] < 0x80000000 + 128)
+    {
+      error (0, 0,"Large mmap doesn't cross 0x80000000 boundary");
+      ret = EXIT_UNSUPPORTED;
+      munmap((void *)large_buf, sizeof(uint32_t) * num);
+      return;
+    }
+
+  int bytes_move = 0x80000000 - (uint32_t)large_buf;
+  int arr_size = bytes_move / sizeof(uint32_t);
+
+  FOR_EACH_IMPL (impl, 0)
+    {
+      for (i = 0; i < arr_size; i++)
+        large_buf[i] = i;
+
+      uint32_t * dst = &large_buf[33];
+
+      CALL (impl, (char *)dst, (char *)large_buf, bytes_move);
+
+      for (i = 0; i < arr_size; i++)
+        {
+          if (dst[i] != i)
+     {
+       error (0, 0,
+      "Wrong result in function %s dst \"%p\" src \"%p\" offset \"%d\"",
+      impl->name, dst, large_buf, i);
+       ret = 1;
+       munmap((void *)large_buf, sizeof(uint32_t) * num);
+       return;
+     }
+ }
+    }
+
+  munmap((void *)large_buf, sizeof(uint32_t) * num);
+}
+#endif
+
 int
 test_main (void)
 {
@@ -284,6 +335,11 @@ test_main (void)
     }

   do_random_tests ();
+
+#if __SIZEOF_POINTER__ == 4
+  do_test2 ();
+#endif
+
   return ret;
 }

diff --git a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
index 9c3bbe7..9aa17de 100644
--- a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
+++ b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
@@ -72,7 +72,7 @@ ENTRY (MEMCPY)
  cmp %edx, %eax

 # ifdef USE_AS_MEMMOVE
- jg L(check_forward)
+ ja L(check_forward)

 L(mm_len_0_or_more_backward):
 /* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128]
@@ -81,7 +81,7 @@ L(mm_len_0_or_more_backward):
  jbe L(mm_len_0_16_bytes_backward)

  cmpl $32, %ecx
- jg L(mm_len_32_or_more_backward)
+ ja L(mm_len_32_or_more_backward)

 /* Copy [0..32] and return.  */
  movdqu (%eax), %xmm0
@@ -92,7 +92,7 @@ L(mm_len_0_or_more_backward):

 L(mm_len_32_or_more_backward):
  cmpl $64, %ecx
- jg L(mm_len_64_or_more_backward)
+ ja L(mm_len_64_or_more_backward)

 /* Copy [0..64] and return.  */
  movdqu (%eax), %xmm0
@@ -107,7 +107,7 @@ L(mm_len_32_or_more_backward):

 L(mm_len_64_or_more_backward):
  cmpl $128, %ecx
- jg L(mm_len_128_or_more_backward)
+ ja L(mm_len_128_or_more_backward)

 /* Copy [0..128] and return.  */
  movdqu (%eax), %xmm0
@@ -132,7 +132,7 @@ L(mm_len_128_or_more_backward):
  add %ecx, %eax
  cmp %edx, %eax
  movl SRC(%esp), %eax
- jle L(forward)
+ jbe L(forward)
  PUSH (%esi)
  PUSH (%edi)
  PUSH (%ebx)
@@ -269,7 +269,7 @@ L(check_forward):
  add %edx, %ecx
  cmp %eax, %ecx
  movl LEN(%esp), %ecx
- jle L(forward)
+ jbe L(forward)

 /* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
  separately.  */


--
WBR,
Andrew


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]