[PATCH 7/7] Add _Complex type support to C parser

Tom Tromey tom@tromey.com
Sat Mar 7 15:22:46 GMT 2020


This changes the C parser to add support for complex types in casts.

gdb/ChangeLog
2020-03-07  Tom Tromey  <tom@tromey.com>

	* c-exp.y (FLOAT_KEYWORD, COMPLEX): New tokens.
	(scalar_type): New rule, from typebase.
	(typebase): Use scalar_type.  Recognize complex types.
	(field_name): Handle FLOAT_KEYWORD.
	(ident_tokens): Add _Complex and __complex__.

gdb/testsuite/ChangeLog
2020-03-07  Tom Tromey  <tom@tromey.com>

	* gdb.base/complex-parts.exp: Add type tests.
---
 gdb/ChangeLog                            |  8 +++
 gdb/c-exp.y                              | 81 +++++++++++++++---------
 gdb/testsuite/ChangeLog                  |  4 ++
 gdb/testsuite/gdb.base/complex-parts.exp |  5 ++
 4 files changed, 68 insertions(+), 30 deletions(-)

diff --git a/gdb/c-exp.y b/gdb/c-exp.y
index e0051ba4e00..a3f0fb07b39 100644
--- a/gdb/c-exp.y
+++ b/gdb/c-exp.y
@@ -175,7 +175,7 @@ static void c_print_token (FILE *file, int type, YYSTYPE value);
 
 %type <voidval> exp exp1 type_exp start variable qualified_name lcurly function_method
 %type <lval> rcurly
-%type <tval> type typebase
+%type <tval> type typebase scalar_type
 %type <tvec> nonempty_typelist func_mod parameter_typelist
 /* %type <bval> block */
 
@@ -238,6 +238,7 @@ static void c_print_token (FILE *file, int type, YYSTYPE value);
 /* Special type cases, put in to allow the parser to distinguish different
    legal basetypes.  */
 %token SIGNED_KEYWORD LONG SHORT INT_KEYWORD CONST_KEYWORD VOLATILE_KEYWORD DOUBLE_KEYWORD
+%token FLOAT_KEYWORD COMPLEX
 
 %token <sval> DOLLAR_VARIABLE
 
@@ -1323,20 +1324,11 @@ func_mod:	'(' ')'
 type	:	ptype
 	;
 
-/* Implements (approximately): (type-qualifier)* type-specifier.
+/* A helper production that recognizes scalar types that can validly
+   be used with _Complex.  */
 
-   When type-specifier is only ever a single word, like 'float' then these
-   arrive as pre-built TYPENAME tokens thanks to the classify_name
-   function.  However, when a type-specifier can contain multiple words,
-   for example 'double' can appear as just 'double' or 'long double', and
-   similarly 'long' can appear as just 'long' or in 'long double', then
-   these type-specifiers are parsed into their own tokens in the function
-   lex_one_token and the ident_tokens array.  These separate tokens are all
-   recognised here.  */
-typebase
-	:	TYPENAME
-			{ $$ = $1.type; }
-	|	INT_KEYWORD
+scalar_type:
+		INT_KEYWORD
 			{ $$ = lookup_signed_typename (pstate->language (),
 						       "int"); }
 	|	LONG
@@ -1419,11 +1411,49 @@ typebase
 						"double",
 						NULL,
 						0); }
+	|	FLOAT_KEYWORD
+			{ $$ = lookup_typename (pstate->language (),
+						"float",
+						NULL,
+						0); }
 	|	LONG DOUBLE_KEYWORD
 			{ $$ = lookup_typename (pstate->language (),
 						"long double",
 						NULL,
 						0); }
+	|	UNSIGNED type_name
+			{ $$ = lookup_unsigned_typename (pstate->language (),
+							 TYPE_NAME($2.type)); }
+	|	UNSIGNED
+			{ $$ = lookup_unsigned_typename (pstate->language (),
+							 "int"); }
+	|	SIGNED_KEYWORD type_name
+			{ $$ = lookup_signed_typename (pstate->language (),
+						       TYPE_NAME($2.type)); }
+	|	SIGNED_KEYWORD
+			{ $$ = lookup_signed_typename (pstate->language (),
+						       "int"); }
+	;
+
+/* Implements (approximately): (type-qualifier)* type-specifier.
+
+   When type-specifier is only ever a single word, like 'float' then these
+   arrive as pre-built TYPENAME tokens thanks to the classify_name
+   function.  However, when a type-specifier can contain multiple words,
+   for example 'double' can appear as just 'double' or 'long double', and
+   similarly 'long' can appear as just 'long' or in 'long double', then
+   these type-specifiers are parsed into their own tokens in the function
+   lex_one_token and the ident_tokens array.  These separate tokens are all
+   recognised here.  */
+typebase
+	:	TYPENAME
+			{ $$ = $1.type; }
+	|	scalar_type
+			{ $$ = $1; }
+	|	COMPLEX scalar_type
+			{
+			  $$ = init_complex_type (nullptr, $2);
+			}
 	|	STRUCT name
 			{ $$
 			    = lookup_struct (copy_name ($2).c_str (),
@@ -1490,18 +1520,6 @@ typebase
 						       $2.length);
 			  $$ = NULL;
 			}
-	|	UNSIGNED type_name
-			{ $$ = lookup_unsigned_typename (pstate->language (),
-							 TYPE_NAME($2.type)); }
-	|	UNSIGNED
-			{ $$ = lookup_unsigned_typename (pstate->language (),
-							 "int"); }
-	|	SIGNED_KEYWORD type_name
-			{ $$ = lookup_signed_typename (pstate->language (),
-						       TYPE_NAME($2.type)); }
-	|	SIGNED_KEYWORD
-			{ $$ = lookup_signed_typename (pstate->language (),
-						       "int"); }
                 /* It appears that this rule for templates is never
                    reduced; template recognition happens by lookahead
                    in the token processing code in yylex. */
@@ -1727,12 +1745,11 @@ oper:	OPERATOR NEW
    match the 'name' rule to appear as fields within a struct.  The example
    that initially motivated this was the RISC-V target which models the
    floating point registers as a union with fields called 'float' and
-   'double'.  The 'float' string becomes a TYPENAME token and can appear
-   anywhere a 'name' can, however 'double' is its own token,
-   DOUBLE_KEYWORD, and doesn't match the 'name' rule.*/
+   'double'.  */
 field_name
 	:	name
 	|	DOUBLE_KEYWORD { $$ = typename_stoken ("double"); }
+	|	FLOAT_KEYWORD { $$ = typename_stoken ("float"); }
 	|	INT_KEYWORD { $$ = typename_stoken ("int"); }
 	|	LONG { $$ = typename_stoken ("long"); }
 	|	SHORT { $$ = typename_stoken ("short"); }
@@ -2460,7 +2477,7 @@ static const struct token tokentab2[] =
 /* Identifier-like tokens.  Only type-specifiers than can appear in
    multi-word type names (for example 'double' can appear in 'long
    double') need to be listed here.  type-specifiers that are only ever
-   single word (like 'float') are handled by the classify_name function.  */
+   single word (like 'char') are handled by the classify_name function.  */
 static const struct token ident_tokens[] =
   {
     {"unsigned", UNSIGNED, OP_NULL, 0},
@@ -2472,6 +2489,7 @@ static const struct token ident_tokens[] =
     {"_Alignof", ALIGNOF, OP_NULL, 0},
     {"alignof", ALIGNOF, OP_NULL, FLAG_CXX},
     {"double", DOUBLE_KEYWORD, OP_NULL, 0},
+    {"float", FLOAT_KEYWORD, OP_NULL, 0},
     {"false", FALSEKEYWORD, OP_NULL, FLAG_CXX},
     {"class", CLASS, OP_NULL, FLAG_CXX},
     {"union", UNION, OP_NULL, 0},
@@ -2479,6 +2497,9 @@ static const struct token ident_tokens[] =
     {"const", CONST_KEYWORD, OP_NULL, 0},
     {"enum", ENUM, OP_NULL, 0},
     {"long", LONG, OP_NULL, 0},
+    {"_Complex", COMPLEX, OP_NULL, 0},
+    {"__complex__", COMPLEX, OP_NULL, 0},
+
     {"true", TRUEKEYWORD, OP_NULL, FLAG_CXX},
     {"int", INT_KEYWORD, OP_NULL, 0},
     {"new", NEW, OP_NULL, FLAG_CXX},
diff --git a/gdb/testsuite/gdb.base/complex-parts.exp b/gdb/testsuite/gdb.base/complex-parts.exp
index 0cf4abf56ec..38aad395ad2 100644
--- a/gdb/testsuite/gdb.base/complex-parts.exp
+++ b/gdb/testsuite/gdb.base/complex-parts.exp
@@ -86,3 +86,8 @@ gdb_test "print (5 + 7i) != (8 + 7i)" " = 1"
 gdb_test "print (5 + 7i) != (5 + 92i)" " = 1"
 
 gdb_test "print (20 - 4i) / (3 + 2i)" " = 4 \\+ -4i"
+
+gdb_test "print (_Complex int) 4" " = 4 \\+ 0i"
+gdb_test "print (_Complex float) 4.5" " = 4.5 \\+ 0i"
+gdb_test "ptype __complex__ short" " = _Complex short"
+gdb_test "print (_Complex int) (23.75 + 8.88i)" " = 23 \\+ 8i"
-- 
2.17.2




More information about the Gdb-patches mailing list