summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPhilip Herron <philip.herron@embecosm.com>2022-10-21 14:27:56 +0200
committerArthur Cohen <arthur.cohen@embecosm.com>2022-12-13 14:00:07 +0100
commit019b2f15581948806ee14a6d05b09ec94f04c966 (patch)
tree33a737ec45d313c8797fe297a72126a65b52e265
parent15f04af347e3b65f436808077cbac4fa566019f9 (diff)
gccrs: Add HIR to GCC GENERIC lowering for all nodes
This patch implements the lowering mentioned in the previous patch for all HIR nodes. gcc/rust/ * backend/rust-compile-block.cc: New. * backend/rust-compile-block.h: New. * backend/rust-compile-expr.cc: New. * backend/rust-compile-expr.h: New. * backend/rust-compile-extern.h: New. * backend/rust-compile-fnparam.cc: New. * backend/rust-compile-fnparam.h: New. * backend/rust-compile-implitem.cc: New. * backend/rust-compile-implitem.h: New. * backend/rust-compile-intrinsic.cc: New. * backend/rust-compile-intrinsic.h: New. * backend/rust-compile-item.cc: New. * backend/rust-compile-item.h: New. * backend/rust-compile-pattern.cc: New. * backend/rust-compile-pattern.h: New. * backend/rust-compile-resolve-path.cc: New. * backend/rust-compile-resolve-path.h: New. * backend/rust-compile-stmt.cc: New. * backend/rust-compile-stmt.h: New. * backend/rust-compile-struct-field-expr.cc: New. * backend/rust-compile-struct-field-expr.h: New. * backend/rust-compile-type.cc: New. * backend/rust-compile-type.h: New. * backend/rust-compile-var-decl.h: New. Co-authored-by: David Faust <david.faust@oracle.com>
-rw-r--r--gcc/rust/backend/rust-compile-block.cc158
-rw-r--r--gcc/rust/backend/rust-compile-block.h211
-rw-r--r--gcc/rust/backend/rust-compile-expr.cc2769
-rw-r--r--gcc/rust/backend/rust-compile-expr.h148
-rw-r--r--gcc/rust/backend/rust-compile-extern.h172
-rw-r--r--gcc/rust/backend/rust-compile-fnparam.cc121
-rw-r--r--gcc/rust/backend/rust-compile-fnparam.h70
-rw-r--r--gcc/rust/backend/rust-compile-implitem.cc101
-rw-r--r--gcc/rust/backend/rust-compile-implitem.h91
-rw-r--r--gcc/rust/backend/rust-compile-intrinsic.cc515
-rw-r--r--gcc/rust/backend/rust-compile-intrinsic.h40
-rw-r--r--gcc/rust/backend/rust-compile-item.cc206
-rw-r--r--gcc/rust/backend/rust-compile-item.h88
-rw-r--r--gcc/rust/backend/rust-compile-pattern.cc333
-rw-r--r--gcc/rust/backend/rust-compile-pattern.h95
-rw-r--r--gcc/rust/backend/rust-compile-resolve-path.cc301
-rw-r--r--gcc/rust/backend/rust-compile-resolve-path.h73
-rw-r--r--gcc/rust/backend/rust-compile-stmt.cc115
-rw-r--r--gcc/rust/backend/rust-compile-stmt.h69
-rw-r--r--gcc/rust/backend/rust-compile-struct-field-expr.cc81
-rw-r--r--gcc/rust/backend/rust-compile-struct-field-expr.h46
-rw-r--r--gcc/rust/backend/rust-compile-type.cc713
-rw-r--r--gcc/rust/backend/rust-compile-type.h79
-rw-r--r--gcc/rust/backend/rust-compile-var-decl.h95
24 files changed, 6690 insertions, 0 deletions
diff --git a/gcc/rust/backend/rust-compile-block.cc b/gcc/rust/backend/rust-compile-block.cc
new file mode 100644
index 00000000000..99674e2d1e7
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-block.cc
@@ -0,0 +1,158 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-block.h"
+#include "rust-compile-stmt.h"
+#include "rust-compile-expr.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileBlock::CompileBlock (Context *ctx, Bvariable *result)
+ : HIRCompileBase (ctx), translated (nullptr), result (result)
+{}
+
+tree
+CompileBlock::compile (HIR::BlockExpr *expr, Context *ctx, Bvariable *result)
+{
+ CompileBlock compiler (ctx, result);
+ compiler.visit (*expr);
+ return compiler.translated;
+}
+
+void
+CompileBlock::visit (HIR::BlockExpr &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ tree fndecl = fnctx.fndecl;
+ Location start_location = expr.get_locus ();
+ Location end_location = expr.get_end_locus ();
+ auto body_mappings = expr.get_mappings ();
+
+ Resolver::Rib *rib = nullptr;
+ if (!ctx->get_resolver ()->find_name_rib (body_mappings.get_nodeid (), &rib))
+ {
+ rust_fatal_error (expr.get_locus (), "failed to setup locals per block");
+ return;
+ }
+
+ std::vector<Bvariable *> locals
+ = compile_locals_for_block (ctx, *rib, fndecl);
+
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree new_block = ctx->get_backend ()->block (fndecl, enclosing_scope, locals,
+ start_location, end_location);
+ ctx->push_block (new_block);
+
+ for (auto &s : expr.get_statements ())
+ {
+ auto compiled_expr = CompileStmt::Compile (s.get (), ctx);
+ if (compiled_expr != nullptr)
+ {
+ tree s = convert_to_void (compiled_expr, ICV_STATEMENT);
+ ctx->add_statement (s);
+ }
+ }
+
+ if (expr.has_expr ())
+ {
+ // the previous passes will ensure this is a valid return or
+ // a valid trailing expression
+ tree compiled_expr = CompileExpr::Compile (expr.expr.get (), ctx);
+ if (compiled_expr != nullptr)
+ {
+ if (result == nullptr)
+ {
+ ctx->add_statement (compiled_expr);
+ }
+ else
+ {
+ tree result_reference = ctx->get_backend ()->var_expression (
+ result, expr.get_final_expr ()->get_locus ());
+
+ tree assignment
+ = ctx->get_backend ()->assignment_statement (result_reference,
+ compiled_expr,
+ expr.get_locus ());
+ ctx->add_statement (assignment);
+ }
+ }
+ }
+
+ ctx->pop_block ();
+ translated = new_block;
+}
+
+void
+CompileConditionalBlocks::visit (HIR::IfExpr &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ tree fndecl = fnctx.fndecl;
+ tree condition_expr = CompileExpr::Compile (expr.get_if_condition (), ctx);
+ tree then_block = CompileBlock::compile (expr.get_if_block (), ctx, result);
+
+ translated
+ = ctx->get_backend ()->if_statement (fndecl, condition_expr, then_block,
+ NULL, expr.get_locus ());
+}
+
+void
+CompileConditionalBlocks::visit (HIR::IfExprConseqElse &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ tree fndecl = fnctx.fndecl;
+ tree condition_expr = CompileExpr::Compile (expr.get_if_condition (), ctx);
+ tree then_block = CompileBlock::compile (expr.get_if_block (), ctx, result);
+ tree else_block = CompileBlock::compile (expr.get_else_block (), ctx, result);
+
+ translated
+ = ctx->get_backend ()->if_statement (fndecl, condition_expr, then_block,
+ else_block, expr.get_locus ());
+}
+
+void
+CompileConditionalBlocks::visit (HIR::IfExprConseqIf &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ tree fndecl = fnctx.fndecl;
+ tree condition_expr = CompileExpr::Compile (expr.get_if_condition (), ctx);
+ tree then_block = CompileBlock::compile (expr.get_if_block (), ctx, result);
+
+ // else block
+ std::vector<Bvariable *> locals;
+ Location start_location = expr.get_conseq_if_expr ()->get_locus ();
+ Location end_location = expr.get_conseq_if_expr ()->get_locus (); // FIXME
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree else_block = ctx->get_backend ()->block (fndecl, enclosing_scope, locals,
+ start_location, end_location);
+ ctx->push_block (else_block);
+
+ tree else_stmt_decl
+ = CompileConditionalBlocks::compile (expr.get_conseq_if_expr (), ctx,
+ result);
+ ctx->add_statement (else_stmt_decl);
+
+ ctx->pop_block ();
+
+ translated
+ = ctx->get_backend ()->if_statement (fndecl, condition_expr, then_block,
+ else_block, expr.get_locus ());
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-block.h b/gcc/rust/backend/rust-compile-block.h
new file mode 100644
index 00000000000..cdd17f19ca2
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-block.h
@@ -0,0 +1,211 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_BLOCK
+#define RUST_COMPILE_BLOCK
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileBlock : private HIRCompileBase
+{
+public:
+ static tree compile (HIR::BlockExpr *expr, Context *ctx, Bvariable *result);
+
+protected:
+ void visit (HIR::BlockExpr &expr);
+
+private:
+ CompileBlock (Context *ctx, Bvariable *result);
+
+ tree translated;
+ Bvariable *result;
+};
+
+class CompileConditionalBlocks : public HIRCompileBase,
+ public HIR::HIRExpressionVisitor
+{
+public:
+ static tree compile (HIR::IfExpr *expr, Context *ctx, Bvariable *result)
+ {
+ CompileConditionalBlocks resolver (ctx, result);
+ expr->accept_vis (resolver);
+ return resolver.translated;
+ }
+
+ void visit (HIR::IfExpr &expr) override;
+ void visit (HIR::IfExprConseqElse &expr) override;
+ void visit (HIR::IfExprConseqIf &expr) override;
+
+ // Empty visit for unused Expression HIR nodes.
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::ClosureExprInner &) override {}
+ void visit (HIR::ClosureExprInnerTyped &) override {}
+ void visit (HIR::StructExprFieldIdentifier &) override {}
+ void visit (HIR::StructExprFieldIdentifierValue &) override {}
+ void visit (HIR::StructExprFieldIndexValue &) override {}
+ void visit (HIR::StructExprStruct &) override {}
+ void visit (HIR::StructExprStructFields &) override {}
+ void visit (HIR::LiteralExpr &) override {}
+ void visit (HIR::BorrowExpr &) override {}
+ void visit (HIR::DereferenceExpr &) override {}
+ void visit (HIR::ErrorPropagationExpr &) override {}
+ void visit (HIR::NegationExpr &) override {}
+ void visit (HIR::ArithmeticOrLogicalExpr &) override {}
+ void visit (HIR::ComparisonExpr &) override {}
+ void visit (HIR::LazyBooleanExpr &) override {}
+ void visit (HIR::TypeCastExpr &) override {}
+ void visit (HIR::AssignmentExpr &) override {}
+ void visit (HIR::CompoundAssignmentExpr &) override {}
+ void visit (HIR::GroupedExpr &) override {}
+ void visit (HIR::ArrayExpr &) override {}
+ void visit (HIR::ArrayIndexExpr &) override {}
+ void visit (HIR::TupleExpr &) override {}
+ void visit (HIR::TupleIndexExpr &) override {}
+ void visit (HIR::CallExpr &) override {}
+ void visit (HIR::MethodCallExpr &) override {}
+ void visit (HIR::FieldAccessExpr &) override {}
+ void visit (HIR::BlockExpr &) override {}
+ void visit (HIR::ContinueExpr &) override {}
+ void visit (HIR::BreakExpr &) override {}
+ void visit (HIR::RangeFromToExpr &) override {}
+ void visit (HIR::RangeFromExpr &) override {}
+ void visit (HIR::RangeToExpr &) override {}
+ void visit (HIR::RangeFullExpr &) override {}
+ void visit (HIR::RangeFromToInclExpr &) override {}
+ void visit (HIR::RangeToInclExpr &) override {}
+ void visit (HIR::ReturnExpr &) override {}
+ void visit (HIR::UnsafeBlockExpr &) override {}
+ void visit (HIR::LoopExpr &) override {}
+ void visit (HIR::WhileLoopExpr &) override {}
+ void visit (HIR::WhileLetLoopExpr &) override {}
+ void visit (HIR::ForLoopExpr &) override {}
+ void visit (HIR::IfExprConseqIfLet &) override {}
+ void visit (HIR::IfLetExpr &) override {}
+ void visit (HIR::IfLetExprConseqElse &) override {}
+ void visit (HIR::IfLetExprConseqIf &) override {}
+ void visit (HIR::IfLetExprConseqIfLet &) override {}
+ void visit (HIR::MatchExpr &) override {}
+ void visit (HIR::AwaitExpr &) override {}
+ void visit (HIR::AsyncBlockExpr &) override {}
+
+private:
+ CompileConditionalBlocks (Context *ctx, Bvariable *result)
+ : HIRCompileBase (ctx), translated (nullptr), result (result)
+ {}
+
+ tree translated;
+ Bvariable *result;
+};
+
+class CompileExprWithBlock : public HIRCompileBase,
+ public HIR::HIRExpressionVisitor
+{
+public:
+ static tree compile (HIR::ExprWithBlock *expr, Context *ctx,
+ Bvariable *result)
+ {
+ CompileExprWithBlock resolver (ctx, result);
+ expr->accept_vis (resolver);
+ return resolver.translated;
+ }
+
+ void visit (HIR::IfExpr &expr) override
+ {
+ translated = CompileConditionalBlocks::compile (&expr, ctx, result);
+ }
+
+ void visit (HIR::IfExprConseqElse &expr) override
+ {
+ translated = CompileConditionalBlocks::compile (&expr, ctx, result);
+ }
+
+ void visit (HIR::IfExprConseqIf &expr) override
+ {
+ translated = CompileConditionalBlocks::compile (&expr, ctx, result);
+ }
+
+ // Empty visit for unused Expression HIR nodes.
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::ClosureExprInner &) override {}
+ void visit (HIR::ClosureExprInnerTyped &) override {}
+ void visit (HIR::StructExprFieldIdentifier &) override {}
+ void visit (HIR::StructExprFieldIdentifierValue &) override {}
+ void visit (HIR::StructExprFieldIndexValue &) override {}
+ void visit (HIR::StructExprStruct &) override {}
+ void visit (HIR::StructExprStructFields &) override {}
+ void visit (HIR::LiteralExpr &) override {}
+ void visit (HIR::BorrowExpr &) override {}
+ void visit (HIR::DereferenceExpr &) override {}
+ void visit (HIR::ErrorPropagationExpr &) override {}
+ void visit (HIR::NegationExpr &) override {}
+ void visit (HIR::ArithmeticOrLogicalExpr &) override {}
+ void visit (HIR::ComparisonExpr &) override {}
+ void visit (HIR::LazyBooleanExpr &) override {}
+ void visit (HIR::TypeCastExpr &) override {}
+ void visit (HIR::AssignmentExpr &) override {}
+ void visit (HIR::CompoundAssignmentExpr &) override {}
+ void visit (HIR::GroupedExpr &) override {}
+ void visit (HIR::ArrayExpr &) override {}
+ void visit (HIR::ArrayIndexExpr &) override {}
+ void visit (HIR::TupleExpr &) override {}
+ void visit (HIR::TupleIndexExpr &) override {}
+ void visit (HIR::CallExpr &) override {}
+ void visit (HIR::MethodCallExpr &) override {}
+ void visit (HIR::FieldAccessExpr &) override {}
+ void visit (HIR::BlockExpr &) override {}
+ void visit (HIR::ContinueExpr &) override {}
+ void visit (HIR::BreakExpr &) override {}
+ void visit (HIR::RangeFromToExpr &) override {}
+ void visit (HIR::RangeFromExpr &) override {}
+ void visit (HIR::RangeToExpr &) override {}
+ void visit (HIR::RangeFullExpr &) override {}
+ void visit (HIR::RangeFromToInclExpr &) override {}
+ void visit (HIR::RangeToInclExpr &) override {}
+ void visit (HIR::ReturnExpr &) override {}
+ void visit (HIR::UnsafeBlockExpr &) override {}
+ void visit (HIR::LoopExpr &) override {}
+ void visit (HIR::WhileLoopExpr &) override {}
+ void visit (HIR::WhileLetLoopExpr &) override {}
+ void visit (HIR::ForLoopExpr &) override {}
+ void visit (HIR::IfExprConseqIfLet &) override {}
+ void visit (HIR::IfLetExpr &) override {}
+ void visit (HIR::IfLetExprConseqElse &) override {}
+ void visit (HIR::IfLetExprConseqIf &) override {}
+ void visit (HIR::IfLetExprConseqIfLet &) override {}
+ void visit (HIR::MatchExpr &) override {}
+ void visit (HIR::AwaitExpr &) override {}
+ void visit (HIR::AsyncBlockExpr &) override {}
+
+private:
+ CompileExprWithBlock (Context *ctx, Bvariable *result)
+ : HIRCompileBase (ctx), translated (nullptr), result (result)
+ {}
+
+ tree translated;
+ Bvariable *result;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_BLOCK
diff --git a/gcc/rust/backend/rust-compile-expr.cc b/gcc/rust/backend/rust-compile-expr.cc
new file mode 100644
index 00000000000..8d239e92f45
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-expr.cc
@@ -0,0 +1,2769 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-expr.h"
+#include "rust-compile-struct-field-expr.h"
+#include "rust-hir-trait-resolve.h"
+#include "rust-hir-path-probe.h"
+#include "rust-hir-type-bounds.h"
+#include "rust-compile-pattern.h"
+#include "rust-compile-resolve-path.h"
+#include "rust-compile-block.h"
+#include "rust-compile-implitem.h"
+#include "rust-constexpr.h"
+
+#include "fold-const.h"
+#include "realmpfr.h"
+#include "convert.h"
+#include "print-tree.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileExpr::CompileExpr (Context *ctx)
+ : HIRCompileBase (ctx), translated (error_mark_node)
+{}
+
+tree
+CompileExpr::Compile (HIR::Expr *expr, Context *ctx)
+{
+ CompileExpr compiler (ctx);
+ expr->accept_vis (compiler);
+ return compiler.translated;
+}
+
+void
+CompileExpr::visit (HIR::TupleIndexExpr &expr)
+{
+ HIR::Expr *tuple_expr = expr.get_tuple_expr ().get ();
+ TupleIndex index = expr.get_tuple_index ();
+
+ tree receiver_ref = CompileExpr::Compile (tuple_expr, ctx);
+
+ TyTy::BaseType *tuple_expr_ty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (tuple_expr->get_mappings ().get_hirid (),
+ &tuple_expr_ty);
+ rust_assert (ok);
+
+ // do we need to add an indirect reference
+ if (tuple_expr_ty->get_kind () == TyTy::TypeKind::REF)
+ {
+ tree indirect = indirect_expression (receiver_ref, expr.get_locus ());
+ receiver_ref = indirect;
+ }
+
+ translated
+ = ctx->get_backend ()->struct_field_expression (receiver_ref, index,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::TupleExpr &expr)
+{
+ if (expr.is_unit ())
+ {
+ translated = ctx->get_backend ()->unit_expression ();
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_fatal_error (expr.get_locus (),
+ "did not resolve type for this TupleExpr");
+ return;
+ }
+
+ tree tuple_type = TyTyResolveCompile::compile (ctx, tyty);
+ rust_assert (tuple_type != nullptr);
+
+ // this assumes all fields are in order from type resolution
+ std::vector<tree> vals;
+ for (auto &elem : expr.get_tuple_elems ())
+ {
+ auto e = CompileExpr::Compile (elem.get (), ctx);
+ vals.push_back (e);
+ }
+
+ translated
+ = ctx->get_backend ()->constructor_expression (tuple_type, false, vals, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::ReturnExpr &expr)
+{
+ auto fncontext = ctx->peek_fn ();
+
+ std::vector<tree> retstmts;
+ if (expr.has_return_expr ())
+ {
+ tree compiled_expr = CompileExpr::Compile (expr.return_expr.get (), ctx);
+ rust_assert (compiled_expr != nullptr);
+
+ retstmts.push_back (compiled_expr);
+ }
+
+ auto s = ctx->get_backend ()->return_statement (fncontext.fndecl, retstmts,
+ expr.get_locus ());
+ ctx->add_statement (s);
+}
+
+void
+CompileExpr::visit (HIR::ArithmeticOrLogicalExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto lhs = CompileExpr::Compile (expr.get_lhs (), ctx);
+ auto rhs = CompileExpr::Compile (expr.get_rhs (), ctx);
+
+ // this might be an operator overload situation lets check
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type
+ = Analysis::RustLangItem::OperatorToLangItem (expr.get_expr_type ());
+ translated = resolve_operator_overload (lang_item_type, expr, lhs, rhs,
+ expr.get_lhs (), expr.get_rhs ());
+ return;
+ }
+
+ translated
+ = ctx->get_backend ()->arithmetic_or_logical_expression (op, lhs, rhs,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::CompoundAssignmentExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto lhs = CompileExpr::Compile (expr.get_left_expr ().get (), ctx);
+ auto rhs = CompileExpr::Compile (expr.get_right_expr ().get (), ctx);
+
+ // this might be an operator overload situation lets check
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type
+ = Analysis::RustLangItem::CompoundAssignmentOperatorToLangItem (
+ expr.get_expr_type ());
+ auto compound_assignment
+ = resolve_operator_overload (lang_item_type, expr, lhs, rhs,
+ expr.get_left_expr ().get (),
+ expr.get_right_expr ().get ());
+ ctx->add_statement (compound_assignment);
+
+ return;
+ }
+
+ auto operator_expr
+ = ctx->get_backend ()->arithmetic_or_logical_expression (op, lhs, rhs,
+ expr.get_locus ());
+ tree assignment
+ = ctx->get_backend ()->assignment_statement (lhs, operator_expr,
+ expr.get_locus ());
+ ctx->add_statement (assignment);
+}
+
+void
+CompileExpr::visit (HIR::NegationExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto negated_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx);
+ auto location = expr.get_locus ();
+
+ // this might be an operator overload situation lets check
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type
+ = Analysis::RustLangItem::NegationOperatorToLangItem (op);
+ translated
+ = resolve_operator_overload (lang_item_type, expr, negated_expr,
+ nullptr, expr.get_expr ().get (), nullptr);
+ return;
+ }
+
+ translated
+ = ctx->get_backend ()->negation_expression (op, negated_expr, location);
+}
+
+void
+CompileExpr::visit (HIR::ComparisonExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto lhs = CompileExpr::Compile (expr.get_lhs (), ctx);
+ auto rhs = CompileExpr::Compile (expr.get_rhs (), ctx);
+ auto location = expr.get_locus ();
+
+ translated
+ = ctx->get_backend ()->comparison_expression (op, lhs, rhs, location);
+}
+
+void
+CompileExpr::visit (HIR::LazyBooleanExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto lhs = CompileExpr::Compile (expr.get_lhs (), ctx);
+ auto rhs = CompileExpr::Compile (expr.get_rhs (), ctx);
+ auto location = expr.get_locus ();
+
+ translated
+ = ctx->get_backend ()->lazy_boolean_expression (op, lhs, rhs, location);
+}
+
+void
+CompileExpr::visit (HIR::TypeCastExpr &expr)
+{
+ TyTy::BaseType *type_to_cast_to_ty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &type_to_cast_to_ty))
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *casted_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ expr.get_casted_expr ()->get_mappings ().get_hirid (), &casted_tyty))
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ auto type_to_cast_to = TyTyResolveCompile::compile (ctx, type_to_cast_to_ty);
+ auto casted_expr = CompileExpr::Compile (expr.get_casted_expr ().get (), ctx);
+
+ std::vector<Resolver::Adjustment> *adjustments = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_cast_autoderef_mappings (
+ expr.get_mappings ().get_hirid (), &adjustments);
+ if (ok)
+ {
+ casted_expr
+ = resolve_adjustements (*adjustments, casted_expr, expr.get_locus ());
+ }
+
+ translated
+ = type_cast_expression (type_to_cast_to, casted_expr, expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::IfExpr &expr)
+{
+ auto stmt = CompileConditionalBlocks::compile (&expr, ctx, nullptr);
+ ctx->add_statement (stmt);
+}
+
+void
+CompileExpr::visit (HIR::IfExprConseqElse &expr)
+{
+ TyTy::BaseType *if_type = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &if_type))
+ {
+ rust_error_at (expr.get_locus (),
+ "failed to lookup type of IfExprConseqElse");
+ return;
+ }
+
+ Bvariable *tmp = NULL;
+ bool needs_temp = !if_type->is_unit ();
+ if (needs_temp)
+ {
+ fncontext fnctx = ctx->peek_fn ();
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, if_type);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = nullptr;
+ tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ auto stmt = CompileConditionalBlocks::compile (&expr, ctx, tmp);
+ ctx->add_statement (stmt);
+
+ if (tmp != NULL)
+ {
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::IfExprConseqIf &expr)
+{
+ TyTy::BaseType *if_type = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &if_type))
+ {
+ rust_error_at (expr.get_locus (),
+ "failed to lookup type of IfExprConseqElse");
+ return;
+ }
+
+ Bvariable *tmp = NULL;
+ bool needs_temp = !if_type->is_unit ();
+ if (needs_temp)
+ {
+ fncontext fnctx = ctx->peek_fn ();
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, if_type);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = nullptr;
+ tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ auto stmt = CompileConditionalBlocks::compile (&expr, ctx, tmp);
+ ctx->add_statement (stmt);
+
+ if (tmp != NULL)
+ {
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::BlockExpr &expr)
+{
+ TyTy::BaseType *block_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &block_tyty))
+ {
+ rust_error_at (expr.get_locus (), "failed to lookup type of BlockExpr");
+ return;
+ }
+
+ Bvariable *tmp = NULL;
+ bool needs_temp = !block_tyty->is_unit ();
+ if (needs_temp)
+ {
+ fncontext fnctx = ctx->peek_fn ();
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, block_tyty);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = nullptr;
+ tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ auto block_stmt = CompileBlock::compile (&expr, ctx, tmp);
+ rust_assert (TREE_CODE (block_stmt) == BIND_EXPR);
+ ctx->add_statement (block_stmt);
+
+ if (tmp != NULL)
+ {
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::UnsafeBlockExpr &expr)
+{
+ expr.get_block_expr ()->accept_vis (*this);
+}
+
+void
+CompileExpr::visit (HIR::StructExprStruct &struct_expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (struct_expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_error_at (struct_expr.get_locus (), "unknown type");
+ return;
+ }
+
+ rust_assert (tyty->is_unit ());
+ translated = ctx->get_backend ()->unit_expression ();
+}
+
+void
+CompileExpr::visit (HIR::StructExprStructFields &struct_expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (struct_expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_error_at (struct_expr.get_locus (), "unknown type");
+ return;
+ }
+
+ // it must be an ADT
+ rust_assert (tyty->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (tyty);
+
+ // what variant is it?
+ int union_disriminator = struct_expr.union_index;
+ TyTy::VariantDef *variant = nullptr;
+ if (!adt->is_enum ())
+ {
+ rust_assert (adt->number_of_variants () == 1);
+ variant = adt->get_variants ().at (0);
+ }
+ else
+ {
+ HirId variant_id;
+ bool ok = ctx->get_tyctx ()->lookup_variant_definition (
+ struct_expr.get_struct_name ().get_mappings ().get_hirid (),
+ &variant_id);
+ rust_assert (ok);
+
+ ok
+ = adt->lookup_variant_by_id (variant_id, &variant, &union_disriminator);
+ rust_assert (ok);
+ }
+
+ // compile it
+ tree compiled_adt_type = TyTyResolveCompile::compile (ctx, tyty);
+
+ std::vector<tree> arguments;
+ if (adt->is_union ())
+ {
+ rust_assert (struct_expr.get_fields ().size () == 1);
+
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary
+ auto respective_field = variant->get_field_at_index (union_disriminator);
+ auto expected = respective_field->get_field_type ();
+
+ // process arguments
+ auto &argument = struct_expr.get_fields ().at (0);
+ auto lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ auto rvalue_locus = argument->get_locus ();
+ auto rvalue = CompileStructExprField::Compile (argument.get (), ctx);
+
+ TyTy::BaseType *actual = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+
+ if (ok)
+ {
+ rvalue
+ = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+ }
+
+ // add it to the list
+ arguments.push_back (rvalue);
+ }
+ else
+ {
+ // this assumes all fields are in order from type resolution and if a
+ // base struct was specified those fields are filed via accesors
+ for (size_t i = 0; i < struct_expr.get_fields ().size (); i++)
+ {
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary
+ auto respective_field = variant->get_field_at_index (i);
+ auto expected = respective_field->get_field_type ();
+
+ // process arguments
+ auto &argument = struct_expr.get_fields ().at (i);
+ auto lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ auto rvalue_locus = argument->get_locus ();
+ auto rvalue = CompileStructExprField::Compile (argument.get (), ctx);
+
+ TyTy::BaseType *actual = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+
+ // coerce it if required/possible see
+ // compile/torture/struct_base_init_1.rs
+ if (ok)
+ {
+ rvalue
+ = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+ }
+
+ // add it to the list
+ arguments.push_back (rvalue);
+ }
+ }
+
+ // the constructor depends on whether this is actually an enum or not if
+ // its an enum we need to setup the discriminator
+ std::vector<tree> ctor_arguments;
+ if (adt->is_enum ())
+ {
+ HIR::Expr *discrim_expr = variant->get_discriminant ();
+ tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx);
+ tree folded_discrim_expr = fold_expr (discrim_expr_node);
+ tree qualifier = folded_discrim_expr;
+
+ ctor_arguments.push_back (qualifier);
+ }
+ for (auto &arg : arguments)
+ ctor_arguments.push_back (arg);
+
+ translated = ctx->get_backend ()->constructor_expression (
+ compiled_adt_type, adt->is_enum (), ctor_arguments, union_disriminator,
+ struct_expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::GroupedExpr &expr)
+{
+ translated = CompileExpr::Compile (expr.get_expr_in_parens ().get (), ctx);
+}
+
+void
+CompileExpr::visit (HIR::FieldAccessExpr &expr)
+{
+ HIR::Expr *receiver_expr = expr.get_receiver_expr ().get ();
+ tree receiver_ref = CompileExpr::Compile (receiver_expr, ctx);
+
+ // resolve the receiver back to ADT type
+ TyTy::BaseType *receiver = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ expr.get_receiver_expr ()->get_mappings ().get_hirid (), &receiver))
+ {
+ rust_error_at (expr.get_receiver_expr ()->get_locus (),
+ "unresolved type for receiver");
+ return;
+ }
+
+ size_t field_index = 0;
+ if (receiver->get_kind () == TyTy::TypeKind::ADT)
+ {
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (receiver);
+ rust_assert (!adt->is_enum ());
+ rust_assert (adt->number_of_variants () == 1);
+
+ TyTy::VariantDef *variant = adt->get_variants ().at (0);
+ bool ok
+ = variant->lookup_field (expr.get_field_name (), nullptr, &field_index);
+ rust_assert (ok);
+ }
+ else if (receiver->get_kind () == TyTy::TypeKind::REF)
+ {
+ TyTy::ReferenceType *r = static_cast<TyTy::ReferenceType *> (receiver);
+ TyTy::BaseType *b = r->get_base ();
+ rust_assert (b->get_kind () == TyTy::TypeKind::ADT);
+
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (b);
+ rust_assert (!adt->is_enum ());
+ rust_assert (adt->number_of_variants () == 1);
+
+ TyTy::VariantDef *variant = adt->get_variants ().at (0);
+ bool ok
+ = variant->lookup_field (expr.get_field_name (), nullptr, &field_index);
+ rust_assert (ok);
+
+ tree indirect = indirect_expression (receiver_ref, expr.get_locus ());
+ receiver_ref = indirect;
+ }
+
+ translated
+ = ctx->get_backend ()->struct_field_expression (receiver_ref, field_index,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::QualifiedPathInExpression &expr)
+{
+ translated = ResolvePathRef::Compile (expr, ctx);
+}
+
+void
+CompileExpr::visit (HIR::PathInExpression &expr)
+{
+ translated = ResolvePathRef::Compile (expr, ctx);
+}
+
+void
+CompileExpr::visit (HIR::LoopExpr &expr)
+{
+ TyTy::BaseType *block_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &block_tyty))
+ {
+ rust_error_at (expr.get_locus (), "failed to lookup type of BlockExpr");
+ return;
+ }
+
+ fncontext fnctx = ctx->peek_fn ();
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, block_tyty);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = NULL_TREE;
+ Bvariable *tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ ctx->push_loop_context (tmp);
+
+ if (expr.has_loop_label ())
+ {
+ HIR::LoopLabel &loop_label = expr.get_loop_label ();
+ tree label
+ = ctx->get_backend ()->label (fnctx.fndecl,
+ loop_label.get_lifetime ().get_name (),
+ loop_label.get_locus ());
+ tree label_decl = ctx->get_backend ()->label_definition_statement (label);
+ ctx->add_statement (label_decl);
+ ctx->insert_label_decl (
+ loop_label.get_lifetime ().get_mappings ().get_hirid (), label);
+ }
+
+ tree loop_begin_label
+ = ctx->get_backend ()->label (fnctx.fndecl, "", expr.get_locus ());
+ tree loop_begin_label_decl
+ = ctx->get_backend ()->label_definition_statement (loop_begin_label);
+ ctx->add_statement (loop_begin_label_decl);
+ ctx->push_loop_begin_label (loop_begin_label);
+
+ tree code_block
+ = CompileBlock::compile (expr.get_loop_block ().get (), ctx, nullptr);
+ tree loop_expr
+ = ctx->get_backend ()->loop_expression (code_block, expr.get_locus ());
+ ctx->add_statement (loop_expr);
+
+ ctx->pop_loop_context ();
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+
+ ctx->pop_loop_begin_label ();
+}
+
+void
+CompileExpr::visit (HIR::WhileLoopExpr &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ if (expr.has_loop_label ())
+ {
+ HIR::LoopLabel &loop_label = expr.get_loop_label ();
+ tree label
+ = ctx->get_backend ()->label (fnctx.fndecl,
+ loop_label.get_lifetime ().get_name (),
+ loop_label.get_locus ());
+ tree label_decl = ctx->get_backend ()->label_definition_statement (label);
+ ctx->add_statement (label_decl);
+ ctx->insert_label_decl (
+ loop_label.get_lifetime ().get_mappings ().get_hirid (), label);
+ }
+
+ std::vector<Bvariable *> locals;
+ Location start_location = expr.get_loop_block ()->get_locus ();
+ Location end_location = expr.get_loop_block ()->get_locus (); // FIXME
+
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree loop_block
+ = ctx->get_backend ()->block (fnctx.fndecl, enclosing_scope, locals,
+ start_location, end_location);
+ ctx->push_block (loop_block);
+
+ tree loop_begin_label
+ = ctx->get_backend ()->label (fnctx.fndecl, "", expr.get_locus ());
+ tree loop_begin_label_decl
+ = ctx->get_backend ()->label_definition_statement (loop_begin_label);
+ ctx->add_statement (loop_begin_label_decl);
+ ctx->push_loop_begin_label (loop_begin_label);
+
+ tree condition
+ = CompileExpr::Compile (expr.get_predicate_expr ().get (), ctx);
+ tree exit_expr
+ = ctx->get_backend ()->exit_expression (condition, expr.get_locus ());
+ ctx->add_statement (exit_expr);
+
+ tree code_block_stmt
+ = CompileBlock::compile (expr.get_loop_block ().get (), ctx, nullptr);
+ rust_assert (TREE_CODE (code_block_stmt) == BIND_EXPR);
+ ctx->add_statement (code_block_stmt);
+
+ ctx->pop_loop_begin_label ();
+ ctx->pop_block ();
+
+ tree loop_expr
+ = ctx->get_backend ()->loop_expression (loop_block, expr.get_locus ());
+ ctx->add_statement (loop_expr);
+}
+
+void
+CompileExpr::visit (HIR::BreakExpr &expr)
+{
+ if (expr.has_break_expr ())
+ {
+ tree compiled_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx);
+
+ Bvariable *loop_result_holder = ctx->peek_loop_context ();
+ tree result_reference
+ = ctx->get_backend ()->var_expression (loop_result_holder,
+ expr.get_expr ()->get_locus ());
+
+ tree assignment
+ = ctx->get_backend ()->assignment_statement (result_reference,
+ compiled_expr,
+ expr.get_locus ());
+ ctx->add_statement (assignment);
+ }
+
+ if (expr.has_label ())
+ {
+ NodeId resolved_node_id = UNKNOWN_NODEID;
+ if (!ctx->get_resolver ()->lookup_resolved_label (
+ expr.get_label ().get_mappings ().get_nodeid (), &resolved_node_id))
+ {
+ rust_error_at (
+ expr.get_label ().get_locus (),
+ "failed to resolve compiled label for label %s",
+ expr.get_label ().get_mappings ().as_string ().c_str ());
+ return;
+ }
+
+ HirId ref = UNKNOWN_HIRID;
+ if (!ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref))
+ {
+ rust_fatal_error (expr.get_locus (), "reverse lookup label failure");
+ return;
+ }
+
+ tree label = NULL_TREE;
+ if (!ctx->lookup_label_decl (ref, &label))
+ {
+ rust_error_at (expr.get_label ().get_locus (),
+ "failed to lookup compiled label");
+ return;
+ }
+
+ tree goto_label
+ = ctx->get_backend ()->goto_statement (label, expr.get_locus ());
+ ctx->add_statement (goto_label);
+ }
+ else
+ {
+ tree exit_expr = ctx->get_backend ()->exit_expression (
+ ctx->get_backend ()->boolean_constant_expression (true),
+ expr.get_locus ());
+ ctx->add_statement (exit_expr);
+ }
+}
+
+void
+CompileExpr::visit (HIR::ContinueExpr &expr)
+{
+ tree label = ctx->peek_loop_begin_label ();
+ if (expr.has_label ())
+ {
+ NodeId resolved_node_id = UNKNOWN_NODEID;
+ if (!ctx->get_resolver ()->lookup_resolved_label (
+ expr.get_label ().get_mappings ().get_nodeid (), &resolved_node_id))
+ {
+ rust_error_at (
+ expr.get_label ().get_locus (),
+ "failed to resolve compiled label for label %s",
+ expr.get_label ().get_mappings ().as_string ().c_str ());
+ return;
+ }
+
+ HirId ref = UNKNOWN_HIRID;
+ if (!ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref))
+ {
+ rust_fatal_error (expr.get_locus (), "reverse lookup label failure");
+ return;
+ }
+
+ if (!ctx->lookup_label_decl (ref, &label))
+ {
+ rust_error_at (expr.get_label ().get_locus (),
+ "failed to lookup compiled label");
+ return;
+ }
+ }
+
+ translated = ctx->get_backend ()->goto_statement (label, expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::BorrowExpr &expr)
+{
+ tree main_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx);
+ if (SLICE_TYPE_P (TREE_TYPE (main_expr)))
+ {
+ translated = main_expr;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ return;
+
+ translated = address_expression (main_expr, expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::DereferenceExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_fatal_error (expr.get_locus (),
+ "did not resolve type for this TupleExpr");
+ return;
+ }
+
+ tree main_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx);
+
+ // this might be an operator overload situation lets check
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type = Analysis::RustLangItem::ItemType::DEREF;
+ tree operator_overload_call
+ = resolve_operator_overload (lang_item_type, expr, main_expr, nullptr,
+ expr.get_expr ().get (), nullptr);
+
+ // rust deref always returns a reference from this overload then we can
+ // actually do the indirection
+ main_expr = operator_overload_call;
+ }
+
+ tree expected_type = TyTyResolveCompile::compile (ctx, tyty);
+ if (SLICE_TYPE_P (TREE_TYPE (main_expr)) && SLICE_TYPE_P (expected_type))
+ {
+ translated = main_expr;
+ return;
+ }
+
+ translated = indirect_expression (main_expr, expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::LiteralExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ return;
+
+ switch (expr.get_lit_type ())
+ {
+ case HIR::Literal::BOOL:
+ translated = compile_bool_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::INT:
+ translated = compile_integer_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::FLOAT:
+ translated = compile_float_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::CHAR:
+ translated = compile_char_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::BYTE:
+ translated = compile_byte_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::STRING:
+ translated = compile_string_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::BYTE_STRING:
+ translated = compile_byte_string_literal (expr, tyty);
+ return;
+ }
+}
+
+void
+CompileExpr::visit (HIR::AssignmentExpr &expr)
+{
+ auto lvalue = CompileExpr::Compile (expr.get_lhs (), ctx);
+ auto rvalue = CompileExpr::Compile (expr.get_rhs (), ctx);
+
+ // assignments are coercion sites so lets convert the rvalue if necessary
+ TyTy::BaseType *expected = nullptr;
+ TyTy::BaseType *actual = nullptr;
+
+ bool ok;
+ ok = ctx->get_tyctx ()->lookup_type (
+ expr.get_lhs ()->get_mappings ().get_hirid (), &expected);
+ rust_assert (ok);
+
+ ok = ctx->get_tyctx ()->lookup_type (
+ expr.get_rhs ()->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+
+ rvalue = coercion_site (expr.get_mappings ().get_hirid (), rvalue, actual,
+ expected, expr.get_lhs ()->get_locus (),
+ expr.get_rhs ()->get_locus ());
+
+ tree assignment
+ = ctx->get_backend ()->assignment_statement (lvalue, rvalue,
+ expr.get_locus ());
+
+ ctx->add_statement (assignment);
+}
+
+// Helper for sort_tuple_patterns.
+// Determine whether Patterns a and b are really the same pattern.
+// FIXME: This is a nasty hack to avoid properly implementing a comparison
+// for Patterns, which we really probably do want at some point.
+static bool
+patterns_mergeable (HIR::Pattern *a, HIR::Pattern *b)
+{
+ if (!a || !b)
+ return false;
+
+ HIR::Pattern::PatternType pat_type = a->get_pattern_type ();
+ if (b->get_pattern_type () != pat_type)
+ return false;
+
+ switch (pat_type)
+ {
+ case HIR::Pattern::PatternType::PATH: {
+ // FIXME: this is far too naive
+ HIR::PathPattern &aref = *static_cast<HIR::PathPattern *> (a);
+ HIR::PathPattern &bref = *static_cast<HIR::PathPattern *> (b);
+ if (aref.get_num_segments () != bref.get_num_segments ())
+ return false;
+
+ const auto &asegs = aref.get_segments ();
+ const auto &bsegs = bref.get_segments ();
+ for (size_t i = 0; i < asegs.size (); i++)
+ {
+ if (asegs[i].as_string () != bsegs[i].as_string ())
+ return false;
+ }
+ return true;
+ }
+ break;
+ case HIR::Pattern::PatternType::LITERAL: {
+ HIR::LiteralPattern &aref = *static_cast<HIR::LiteralPattern *> (a);
+ HIR::LiteralPattern &bref = *static_cast<HIR::LiteralPattern *> (b);
+ return aref.get_literal ().is_equal (bref.get_literal ());
+ }
+ break;
+ case HIR::Pattern::PatternType::IDENTIFIER: {
+ // TODO
+ }
+ break;
+ case HIR::Pattern::PatternType::WILDCARD:
+ return true;
+ break;
+
+ // TODO
+
+ default:;
+ }
+ return false;
+}
+
+// A little container for rearranging the patterns and cases in a match
+// expression while simplifying.
+struct PatternMerge
+{
+ std::unique_ptr<HIR::MatchCase> wildcard;
+ std::vector<std::unique_ptr<HIR::Pattern>> heads;
+ std::vector<std::vector<HIR::MatchCase>> cases;
+};
+
+// Helper for simplify_tuple_match.
+// For each tuple pattern in a given match, pull out the first elt of the
+// tuple and construct a new MatchCase with the remaining tuple elts as the
+// pattern. Return a mapping from each _unique_ first tuple element to a
+// vec of cases for a new match.
+//
+// FIXME: This used to be a std::map<Pattern, Vec<MatchCase>>, but it doesn't
+// actually work like we want - the Pattern includes an HIR ID, which is unique
+// per Pattern object. This means we don't have a good means for comparing
+// Patterns. It would probably be best to actually implement a means of
+// properly comparing patterns, and then use an actual map.
+//
+static struct PatternMerge
+sort_tuple_patterns (HIR::MatchExpr &expr)
+{
+ rust_assert (expr.get_scrutinee_expr ()->get_expression_type ()
+ == HIR::Expr::ExprType::Tuple);
+
+ struct PatternMerge result;
+ result.wildcard = nullptr;
+ result.heads = std::vector<std::unique_ptr<HIR::Pattern>> ();
+ result.cases = std::vector<std::vector<HIR::MatchCase>> ();
+
+ for (auto &match_case : expr.get_match_cases ())
+ {
+ HIR::MatchArm &case_arm = match_case.get_arm ();
+
+ // FIXME: Note we are only dealing with the first pattern in the arm.
+ // The patterns vector in the arm might hold many patterns, which are the
+ // patterns separated by the '|' token. Rustc abstracts these as "Or"
+ // patterns, and part of its simplification process is to get rid of them.
+ // We should get rid of the ORs too, maybe here or earlier than here?
+ auto pat = case_arm.get_patterns ()[0]->clone_pattern ();
+
+ // Record wildcards so we can add them in inner matches.
+ if (pat->get_pattern_type () == HIR::Pattern::PatternType::WILDCARD)
+ {
+ // The *whole* pattern is a wild card (_).
+ result.wildcard
+ = std::unique_ptr<HIR::MatchCase> (new HIR::MatchCase (match_case));
+ continue;
+ }
+
+ rust_assert (pat->get_pattern_type ()
+ == HIR::Pattern::PatternType::TUPLE);
+
+ auto ref = *static_cast<HIR::TuplePattern *> (pat.get ());
+
+ rust_assert (ref.has_tuple_pattern_items ());
+
+ auto items
+ = HIR::TuplePattern (ref).get_items ()->clone_tuple_pattern_items ();
+ if (items->get_pattern_type ()
+ == HIR::TuplePatternItems::TuplePatternItemType::MULTIPLE)
+ {
+ auto items_ref
+ = *static_cast<HIR::TuplePatternItemsMultiple *> (items.get ());
+
+ // Pop the first pattern out
+ auto patterns = std::vector<std::unique_ptr<HIR::Pattern>> ();
+ auto first = items_ref.get_patterns ()[0]->clone_pattern ();
+ for (auto p = items_ref.get_patterns ().begin () + 1;
+ p != items_ref.get_patterns ().end (); p++)
+ {
+ patterns.push_back ((*p)->clone_pattern ());
+ }
+
+ // if there is only one pattern left, don't make a tuple out of it
+ std::unique_ptr<HIR::Pattern> result_pattern;
+ if (patterns.size () == 1)
+ {
+ result_pattern = std::move (patterns[0]);
+ }
+ else
+ {
+ auto new_items = std::unique_ptr<HIR::TuplePatternItems> (
+ new HIR::TuplePatternItemsMultiple (std::move (patterns)));
+
+ // Construct a TuplePattern from the rest of the patterns
+ result_pattern = std::unique_ptr<HIR::Pattern> (
+ new HIR::TuplePattern (ref.get_pattern_mappings (),
+ std::move (new_items),
+ ref.get_locus ()));
+ }
+
+ // I don't know why we need to make foo separately here but
+ // using the { new_tuple } syntax in new_arm constructor does not
+ // compile.
+ auto foo = std::vector<std::unique_ptr<HIR::Pattern>> ();
+ foo.emplace_back (std::move (result_pattern));
+ HIR::MatchArm new_arm (std::move (foo), Location (), nullptr,
+ AST::AttrVec ());
+
+ HIR::MatchCase new_case (match_case.get_mappings (), new_arm,
+ match_case.get_expr ()->clone_expr ());
+
+ bool pushed = false;
+ for (size_t i = 0; i < result.heads.size (); i++)
+ {
+ if (patterns_mergeable (result.heads[i].get (), first.get ()))
+ {
+ result.cases[i].push_back (new_case);
+ pushed = true;
+ }
+ }
+
+ if (!pushed)
+ {
+ result.heads.push_back (std::move (first));
+ result.cases.push_back ({new_case});
+ }
+ }
+ else /* TuplePatternItemType::RANGED */
+ {
+ // FIXME
+ gcc_unreachable ();
+ }
+ }
+
+ return result;
+}
+
+// Helper for CompileExpr::visit (HIR::MatchExpr).
+// Given a MatchExpr where the scrutinee is some kind of tuple, build an
+// equivalent match where only one element of the tuple is examined at a time.
+// This resulting match can then be lowered to a SWITCH_EXPR tree directly.
+//
+// The approach is as follows:
+// 1. Split the scrutinee and each pattern into the first (head) and the
+// rest (tail).
+// 2. Build a mapping of unique pattern heads to the cases (tail and expr)
+// that shared that pattern head in the original match.
+// (This is the job of sort_tuple_patterns ()).
+// 3. For each unique pattern head, build a new MatchCase where the pattern
+// is the unique head, and the expression is a new match where:
+// - The scrutinee is the tail of the original scrutinee
+// - The cases are are those built by the mapping in step 2, i.e. the
+// tails of the patterns and the corresponing expressions from the
+// original match expression.
+// 4. Do this recursively for each inner match, until there is nothing more
+// to simplify.
+// 5. Build the resulting match which scrutinizes the head of the original
+// scrutinee, using the cases built in step 3.
+static HIR::MatchExpr
+simplify_tuple_match (HIR::MatchExpr &expr)
+{
+ if (expr.get_scrutinee_expr ()->get_expression_type ()
+ != HIR::Expr::ExprType::Tuple)
+ return expr;
+
+ auto ref = *static_cast<HIR::TupleExpr *> (expr.get_scrutinee_expr ().get ());
+
+ auto &tail = ref.get_tuple_elems ();
+ rust_assert (tail.size () > 1);
+
+ auto head = std::move (tail[0]);
+ tail.erase (tail.begin (), tail.begin () + 1);
+
+ // e.g.
+ // match (tupA, tupB, tupC) {
+ // (a1, b1, c1) => { blk1 },
+ // (a2, b2, c2) => { blk2 },
+ // (a1, b3, c3) => { blk3 },
+ // }
+ // tail = (tupB, tupC)
+ // head = tupA
+
+ // Make sure the tail is only a tuple if it consists of at least 2 elements.
+ std::unique_ptr<HIR::Expr> remaining;
+ if (tail.size () == 1)
+ remaining = std::move (tail[0]);
+ else
+ remaining = std::unique_ptr<HIR::Expr> (
+ new HIR::TupleExpr (ref.get_mappings (), std::move (tail),
+ AST::AttrVec (), ref.get_outer_attrs (),
+ ref.get_locus ()));
+
+ // e.g.
+ // a1 -> [(b1, c1) => { blk1 },
+ // (b3, c3) => { blk3 }]
+ // a2 -> [(b2, c2) => { blk2 }]
+ struct PatternMerge map = sort_tuple_patterns (expr);
+
+ std::vector<HIR::MatchCase> cases;
+ // Construct the inner match for each unique first elt of the tuple
+ // patterns
+ for (size_t i = 0; i < map.heads.size (); i++)
+ {
+ auto inner_match_cases = map.cases[i];
+
+ // If there is a wildcard at the outer match level, then need to
+ // propegate the wildcard case into *every* inner match.
+ // FIXME: It is probably not correct to add this unconditionally, what if
+ // we have a pattern like (a, _, c)? Then there is already a wildcard in
+ // the inner matches, and having two will cause two 'default:' blocks
+ // which is an error.
+ if (map.wildcard != nullptr)
+ {
+ inner_match_cases.push_back (*(map.wildcard.get ()));
+ }
+
+ // match (tupB, tupC) {
+ // (b1, c1) => { blk1 },
+ // (b3, c3) => { blk3 }
+ // }
+ HIR::MatchExpr inner_match (expr.get_mappings (),
+ remaining->clone_expr (), inner_match_cases,
+ AST::AttrVec (), expr.get_outer_attrs (),
+ expr.get_locus ());
+
+ inner_match = simplify_tuple_match (inner_match);
+
+ auto outer_arm_pat = std::vector<std::unique_ptr<HIR::Pattern>> ();
+ outer_arm_pat.emplace_back (map.heads[i]->clone_pattern ());
+
+ HIR::MatchArm outer_arm (std::move (outer_arm_pat), expr.get_locus ());
+
+ // Need to move the inner match to the heap and put it in a unique_ptr to
+ // build the actual match case of the outer expression
+ // auto inner_expr = std::unique_ptr<HIR::Expr> (new HIR::MatchExpr
+ // (inner_match));
+ auto inner_expr = inner_match.clone_expr ();
+
+ // a1 => match (tupB, tupC) { ... }
+ HIR::MatchCase outer_case (expr.get_mappings (), outer_arm,
+ std::move (inner_expr));
+
+ cases.push_back (outer_case);
+ }
+
+ // If there was a wildcard, make sure to include it at the outer match level
+ // too.
+ if (map.wildcard != nullptr)
+ {
+ cases.push_back (*(map.wildcard.get ()));
+ }
+
+ // match tupA {
+ // a1 => match (tupB, tupC) {
+ // (b1, c1) => { blk1 },
+ // (b3, c3) => { blk3 }
+ // }
+ // a2 => match (tupB, tupC) {
+ // (b2, c2) => { blk2 }
+ // }
+ // }
+ HIR::MatchExpr outer_match (expr.get_mappings (), std::move (head), cases,
+ AST::AttrVec (), expr.get_outer_attrs (),
+ expr.get_locus ());
+
+ return outer_match;
+}
+
+// Helper for CompileExpr::visit (HIR::MatchExpr).
+// Check that the scrutinee of EXPR is a valid kind of expression to match on.
+// Return the TypeKind of the scrutinee if it is valid, or TyTy::TypeKind::ERROR
+// if not.
+static TyTy::TypeKind
+check_match_scrutinee (HIR::MatchExpr &expr, Context *ctx)
+{
+ TyTy::BaseType *scrutinee_expr_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ expr.get_scrutinee_expr ()->get_mappings ().get_hirid (),
+ &scrutinee_expr_tyty))
+ {
+ return TyTy::TypeKind::ERROR;
+ }
+
+ TyTy::TypeKind scrutinee_kind = scrutinee_expr_tyty->get_kind ();
+ rust_assert ((TyTy::is_primitive_type_kind (scrutinee_kind)
+ && scrutinee_kind != TyTy::TypeKind::NEVER)
+ || scrutinee_kind == TyTy::TypeKind::ADT
+ || scrutinee_kind == TyTy::TypeKind::TUPLE);
+
+ if (scrutinee_kind == TyTy::TypeKind::ADT)
+ {
+ // this will need to change but for now the first pass implementation,
+ // lets assert this is the case
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (scrutinee_expr_tyty);
+ rust_assert (adt->is_enum ());
+ rust_assert (adt->number_of_variants () > 0);
+ }
+ else if (scrutinee_kind == TyTy::TypeKind::FLOAT)
+ {
+ // FIXME: CASE_LABEL_EXPR does not support floating point types.
+ // Find another way to compile these.
+ rust_sorry_at (expr.get_locus (),
+ "match on floating-point types is not yet supported");
+ }
+
+ TyTy::BaseType *expr_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &expr_tyty))
+ {
+ return TyTy::TypeKind::ERROR;
+ }
+
+ return scrutinee_kind;
+}
+
+void
+CompileExpr::visit (HIR::MatchExpr &expr)
+{
+ // https://gcc.gnu.org/onlinedocs/gccint/Basic-Statements.html#Basic-Statements
+ // TODO
+ // SWITCH_ALL_CASES_P is true if the switch includes a default label or the
+ // case label ranges cover all possible values of the condition expression
+
+ /* Switch expression.
+
+ TREE_TYPE is the original type of the condition, before any
+ language required type conversions. It may be NULL, in which case
+ the original type and final types are assumed to be the same.
+
+ Operand 0 is the expression used to perform the branch,
+ Operand 1 is the body of the switch, which probably contains
+ CASE_LABEL_EXPRs. It may also be NULL, in which case operand 2
+ must not be NULL. */
+ // DEFTREECODE (SWITCH_EXPR, "switch_expr", tcc_statement, 2)
+
+ /* Used to represent a case label.
+
+ Operand 0 is CASE_LOW. It may be NULL_TREE, in which case the label
+ is a 'default' label.
+ Operand 1 is CASE_HIGH. If it is NULL_TREE, the label is a simple
+ (one-value) case label. If it is non-NULL_TREE, the case is a range.
+ Operand 2 is CASE_LABEL, which has the corresponding LABEL_DECL.
+ Operand 3 is CASE_CHAIN. This operand is only used in tree-cfg.cc to
+ speed up the lookup of case labels which use a particular edge in
+ the control flow graph. */
+ // DEFTREECODE (CASE_LABEL_EXPR, "case_label_expr", tcc_statement, 4)
+
+ TyTy::TypeKind scrutinee_kind = check_match_scrutinee (expr, ctx);
+ if (scrutinee_kind == TyTy::TypeKind::ERROR)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *expr_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &expr_tyty))
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ fncontext fnctx = ctx->peek_fn ();
+ Bvariable *tmp = NULL;
+ bool needs_temp = !expr_tyty->is_unit ();
+ if (needs_temp)
+ {
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, expr_tyty);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = nullptr;
+ tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ // lets compile the scrutinee expression
+ tree match_scrutinee_expr
+ = CompileExpr::Compile (expr.get_scrutinee_expr ().get (), ctx);
+
+ tree match_scrutinee_expr_qualifier_expr;
+ if (TyTy::is_primitive_type_kind (scrutinee_kind))
+ {
+ match_scrutinee_expr_qualifier_expr = match_scrutinee_expr;
+ }
+ else if (scrutinee_kind == TyTy::TypeKind::ADT)
+ {
+ // need to access qualifier the field, if we use QUAL_UNION_TYPE this
+ // would be DECL_QUALIFIER i think. For now this will just access the
+ // first record field and its respective qualifier because it will always
+ // be set because this is all a big special union
+ tree scrutinee_first_record_expr
+ = ctx->get_backend ()->struct_field_expression (
+ match_scrutinee_expr, 0, expr.get_scrutinee_expr ()->get_locus ());
+ match_scrutinee_expr_qualifier_expr
+ = ctx->get_backend ()->struct_field_expression (
+ scrutinee_first_record_expr, 0,
+ expr.get_scrutinee_expr ()->get_locus ());
+ }
+ else if (scrutinee_kind == TyTy::TypeKind::TUPLE)
+ {
+ // match on tuple becomes a series of nested switches, with one level
+ // for each element of the tuple from left to right.
+ auto exprtype = expr.get_scrutinee_expr ()->get_expression_type ();
+ switch (exprtype)
+ {
+ case HIR::Expr::ExprType::Tuple: {
+ // Build an equivalent expression which is nicer to lower.
+ HIR::MatchExpr outer_match = simplify_tuple_match (expr);
+
+ // We've rearranged the match into something that lowers better
+ // to GENERIC trees.
+ // For actually doing the lowering we need to compile the match
+ // we've just made. But we're half-way through compiling the
+ // original one.
+ // ...
+ // For now, let's just replace the original with the rearranged one
+ // we just made, and compile that instead. What could go wrong? :)
+ //
+ // FIXME: What about when we decide a temporary is needed above?
+ // We might have already pushed a statement for it that
+ // we no longer need. Probably need to rearrange the order
+ // of these steps.
+ expr = outer_match;
+
+ scrutinee_kind = check_match_scrutinee (expr, ctx);
+ if (scrutinee_kind == TyTy::TypeKind::ERROR)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ // Now compile the scrutinee of the simplified match.
+ // FIXME: this part is duplicated from above.
+ match_scrutinee_expr
+ = CompileExpr::Compile (expr.get_scrutinee_expr ().get (), ctx);
+
+ if (TyTy::is_primitive_type_kind (scrutinee_kind))
+ {
+ match_scrutinee_expr_qualifier_expr = match_scrutinee_expr;
+ }
+ else if (scrutinee_kind == TyTy::TypeKind::ADT)
+ {
+ // need to access qualifier the field, if we use QUAL_UNION_TYPE
+ // this would be DECL_QUALIFIER i think. For now this will just
+ // access the first record field and its respective qualifier
+ // because it will always be set because this is all a big
+ // special union
+ tree scrutinee_first_record_expr
+ = ctx->get_backend ()->struct_field_expression (
+ match_scrutinee_expr, 0,
+ expr.get_scrutinee_expr ()->get_locus ());
+ match_scrutinee_expr_qualifier_expr
+ = ctx->get_backend ()->struct_field_expression (
+ scrutinee_first_record_expr, 0,
+ expr.get_scrutinee_expr ()->get_locus ());
+ }
+ else
+ {
+ // FIXME: There are other cases, but it better not be a Tuple
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case HIR::Expr::ExprType::Path: {
+ // FIXME
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ // FIXME: match on other types of expressions not yet implemented.
+ gcc_unreachable ();
+ }
+
+ // setup the end label so the cases can exit properly
+ tree fndecl = fnctx.fndecl;
+ Location end_label_locus = expr.get_locus (); // FIXME
+ tree end_label
+ = ctx->get_backend ()->label (fndecl,
+ "" /* empty creates an artificial label */,
+ end_label_locus);
+ tree end_label_decl_statement
+ = ctx->get_backend ()->label_definition_statement (end_label);
+
+ // setup the switch-body-block
+ Location start_location; // FIXME
+ Location end_location; // FIXME
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree switch_body_block
+ = ctx->get_backend ()->block (fndecl, enclosing_scope, {}, start_location,
+ end_location);
+ ctx->push_block (switch_body_block);
+
+ for (auto &kase : expr.get_match_cases ())
+ {
+ // for now lets just get single pattern's working
+ HIR::MatchArm &kase_arm = kase.get_arm ();
+ rust_assert (kase_arm.get_patterns ().size () > 0);
+
+ // generate implicit label
+ Location arm_locus = kase_arm.get_locus ();
+ tree case_label = ctx->get_backend ()->label (
+ fndecl, "" /* empty creates an artificial label */, arm_locus);
+
+ // setup the bindings for the block
+ for (auto &kase_pattern : kase_arm.get_patterns ())
+ {
+ tree switch_kase_expr
+ = CompilePatternCaseLabelExpr::Compile (kase_pattern.get (),
+ case_label, ctx);
+ ctx->add_statement (switch_kase_expr);
+
+ CompilePatternBindings::Compile (kase_pattern.get (),
+ match_scrutinee_expr, ctx);
+ }
+
+ // compile the expr and setup the assignment if required when tmp != NULL
+ tree kase_expr_tree = CompileExpr::Compile (kase.get_expr ().get (), ctx);
+ if (tmp != NULL)
+ {
+ tree result_reference
+ = ctx->get_backend ()->var_expression (tmp, arm_locus);
+ tree assignment
+ = ctx->get_backend ()->assignment_statement (result_reference,
+ kase_expr_tree,
+ arm_locus);
+ ctx->add_statement (assignment);
+ }
+
+ // go to end label
+ tree goto_end_label = build1_loc (arm_locus.gcc_location (), GOTO_EXPR,
+ void_type_node, end_label);
+ ctx->add_statement (goto_end_label);
+ }
+
+ // setup the switch expression
+ tree match_body = ctx->pop_block ();
+ tree match_expr_stmt
+ = build2_loc (expr.get_locus ().gcc_location (), SWITCH_EXPR,
+ TREE_TYPE (match_scrutinee_expr_qualifier_expr),
+ match_scrutinee_expr_qualifier_expr, match_body);
+ ctx->add_statement (match_expr_stmt);
+ ctx->add_statement (end_label_decl_statement);
+
+ if (tmp != NULL)
+ {
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::CallExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ expr.get_fnexpr ()->get_mappings ().get_hirid (), &tyty))
+ {
+ rust_error_at (expr.get_locus (), "unknown type");
+ return;
+ }
+
+ // must be a tuple constructor
+ bool is_fn = tyty->get_kind () == TyTy::TypeKind::FNDEF
+ || tyty->get_kind () == TyTy::TypeKind::FNPTR;
+ bool is_adt_ctor = !is_fn;
+ if (is_adt_ctor)
+ {
+ rust_assert (tyty->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (tyty);
+ tree compiled_adt_type = TyTyResolveCompile::compile (ctx, tyty);
+
+ // what variant is it?
+ int union_disriminator = -1;
+ TyTy::VariantDef *variant = nullptr;
+ if (!adt->is_enum ())
+ {
+ rust_assert (adt->number_of_variants () == 1);
+ variant = adt->get_variants ().at (0);
+ }
+ else
+ {
+ HirId variant_id;
+ bool ok = ctx->get_tyctx ()->lookup_variant_definition (
+ expr.get_fnexpr ()->get_mappings ().get_hirid (), &variant_id);
+ rust_assert (ok);
+
+ ok = adt->lookup_variant_by_id (variant_id, &variant,
+ &union_disriminator);
+ rust_assert (ok);
+ }
+
+ // this assumes all fields are in order from type resolution and if a
+ // base struct was specified those fields are filed via accesors
+ std::vector<tree> arguments;
+ for (size_t i = 0; i < expr.get_arguments ().size (); i++)
+ {
+ auto &argument = expr.get_arguments ().at (i);
+ auto rvalue = CompileExpr::Compile (argument.get (), ctx);
+
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary
+ auto respective_field = variant->get_field_at_index (i);
+ auto expected = respective_field->get_field_type ();
+
+ TyTy::BaseType *actual = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+
+ // coerce it if required
+ Location lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ Location rvalue_locus = argument->get_locus ();
+ rvalue
+ = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+
+ // add it to the list
+ arguments.push_back (rvalue);
+ }
+
+ // the constructor depends on whether this is actually an enum or not if
+ // its an enum we need to setup the discriminator
+ std::vector<tree> ctor_arguments;
+ if (adt->is_enum ())
+ {
+ HIR::Expr *discrim_expr = variant->get_discriminant ();
+ tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx);
+ tree folded_discrim_expr = fold_expr (discrim_expr_node);
+ tree qualifier = folded_discrim_expr;
+
+ ctor_arguments.push_back (qualifier);
+ }
+ for (auto &arg : arguments)
+ ctor_arguments.push_back (arg);
+
+ translated = ctx->get_backend ()->constructor_expression (
+ compiled_adt_type, adt->is_enum (), ctor_arguments, union_disriminator,
+ expr.get_locus ());
+
+ return;
+ }
+
+ auto get_parameter_tyty_at_index
+ = [] (const TyTy::BaseType *base, size_t index,
+ TyTy::BaseType **result) -> bool {
+ bool is_fn = base->get_kind () == TyTy::TypeKind::FNDEF
+ || base->get_kind () == TyTy::TypeKind::FNPTR;
+ rust_assert (is_fn);
+
+ if (base->get_kind () == TyTy::TypeKind::FNPTR)
+ {
+ const TyTy::FnPtr *fn = static_cast<const TyTy::FnPtr *> (base);
+ *result = fn->param_at (index);
+
+ return true;
+ }
+
+ const TyTy::FnType *fn = static_cast<const TyTy::FnType *> (base);
+ auto param = fn->param_at (index);
+ *result = param.second;
+
+ return true;
+ };
+
+ bool is_varadic = false;
+ if (tyty->get_kind () == TyTy::TypeKind::FNDEF)
+ {
+ const TyTy::FnType *fn = static_cast<const TyTy::FnType *> (tyty);
+ is_varadic = fn->is_varadic ();
+ }
+
+ size_t required_num_args;
+ if (tyty->get_kind () == TyTy::TypeKind::FNDEF)
+ {
+ const TyTy::FnType *fn = static_cast<const TyTy::FnType *> (tyty);
+ required_num_args = fn->num_params ();
+ }
+ else
+ {
+ const TyTy::FnPtr *fn = static_cast<const TyTy::FnPtr *> (tyty);
+ required_num_args = fn->num_params ();
+ }
+
+ std::vector<tree> args;
+ for (size_t i = 0; i < expr.get_arguments ().size (); i++)
+ {
+ auto &argument = expr.get_arguments ().at (i);
+ auto rvalue = CompileExpr::Compile (argument.get (), ctx);
+
+ if (is_varadic && i >= required_num_args)
+ {
+ args.push_back (rvalue);
+ continue;
+ }
+
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary
+ bool ok;
+ TyTy::BaseType *expected = nullptr;
+ ok = get_parameter_tyty_at_index (tyty, i, &expected);
+ rust_assert (ok);
+
+ TyTy::BaseType *actual = nullptr;
+ ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+
+ // coerce it if required
+ Location lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ Location rvalue_locus = argument->get_locus ();
+ rvalue = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+
+ // add it to the list
+ args.push_back (rvalue);
+ }
+
+ // must be a call to a function
+ auto fn_address = CompileExpr::Compile (expr.get_fnexpr (), ctx);
+ translated = ctx->get_backend ()->call_expression (fn_address, args, nullptr,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::MethodCallExpr &expr)
+{
+ // method receiver
+ tree self = CompileExpr::Compile (expr.get_receiver ().get (), ctx);
+
+ // lookup the resolved name
+ NodeId resolved_node_id = UNKNOWN_NODEID;
+ if (!ctx->get_resolver ()->lookup_resolved_name (
+ expr.get_mappings ().get_nodeid (), &resolved_node_id))
+ {
+ rust_error_at (expr.get_locus (), "failed to lookup resolved MethodCall");
+ return;
+ }
+
+ // reverse lookup
+ HirId ref;
+ if (!ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref))
+ {
+ rust_fatal_error (expr.get_locus (), "reverse lookup failure");
+ return;
+ }
+
+ // lookup the expected function type
+ TyTy::BaseType *lookup_fntype = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ expr.get_method_name ().get_mappings ().get_hirid (), &lookup_fntype);
+ rust_assert (ok);
+ rust_assert (lookup_fntype->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (lookup_fntype);
+
+ TyTy::BaseType *receiver = nullptr;
+ ok = ctx->get_tyctx ()->lookup_receiver (expr.get_mappings ().get_hirid (),
+ &receiver);
+ rust_assert (ok);
+
+ bool is_dyn_dispatch
+ = receiver->get_root ()->get_kind () == TyTy::TypeKind::DYNAMIC;
+ bool is_generic_receiver = receiver->get_kind () == TyTy::TypeKind::PARAM;
+ if (is_generic_receiver)
+ {
+ TyTy::ParamType *p = static_cast<TyTy::ParamType *> (receiver);
+ receiver = p->resolve ();
+ }
+
+ tree fn_expr = error_mark_node;
+ if (is_dyn_dispatch)
+ {
+ const TyTy::DynamicObjectType *dyn
+ = static_cast<const TyTy::DynamicObjectType *> (receiver->get_root ());
+
+ std::vector<HIR::Expr *> arguments;
+ for (auto &arg : expr.get_arguments ())
+ arguments.push_back (arg.get ());
+
+ fn_expr
+ = get_fn_addr_from_dyn (dyn, receiver, fntype, self, expr.get_locus ());
+ self = get_receiver_from_dyn (dyn, receiver, fntype, self,
+ expr.get_locus ());
+ }
+ else
+ {
+ // lookup compiled functions since it may have already been compiled
+ HIR::PathExprSegment method_name = expr.get_method_name ();
+ HIR::PathIdentSegment segment_name = method_name.get_segment ();
+ fn_expr
+ = resolve_method_address (fntype, ref, receiver, segment_name,
+ expr.get_mappings (), expr.get_locus ());
+ }
+
+ // lookup the autoderef mappings
+ HirId autoderef_mappings_id
+ = expr.get_receiver ()->get_mappings ().get_hirid ();
+ std::vector<Resolver::Adjustment> *adjustments = nullptr;
+ ok = ctx->get_tyctx ()->lookup_autoderef_mappings (autoderef_mappings_id,
+ &adjustments);
+ rust_assert (ok);
+
+ // apply adjustments for the fn call
+ self = resolve_adjustements (*adjustments, self,
+ expr.get_receiver ()->get_locus ());
+
+ std::vector<tree> args;
+ args.push_back (self); // adjusted self
+
+ // normal args
+ for (size_t i = 0; i < expr.get_arguments ().size (); i++)
+ {
+ auto &argument = expr.get_arguments ().at (i);
+ auto rvalue = CompileExpr::Compile (argument.get (), ctx);
+
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary, offset from the already adjusted implicit self
+ bool ok;
+ TyTy::BaseType *expected = fntype->param_at (i + 1).second;
+
+ TyTy::BaseType *actual = nullptr;
+ ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+
+ // coerce it if required
+ Location lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ Location rvalue_locus = argument->get_locus ();
+ rvalue = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+
+ // add it to the list
+ args.push_back (rvalue);
+ }
+
+ translated = ctx->get_backend ()->call_expression (fn_expr, args, nullptr,
+ expr.get_locus ());
+}
+
+tree
+CompileExpr::get_fn_addr_from_dyn (const TyTy::DynamicObjectType *dyn,
+ TyTy::BaseType *receiver,
+ TyTy::FnType *fntype, tree receiver_ref,
+ Location expr_locus)
+{
+ size_t offs = 0;
+ const Resolver::TraitItemReference *ref = nullptr;
+ for (auto &bound : dyn->get_object_items ())
+ {
+ const Resolver::TraitItemReference *item = bound.first;
+ auto t = item->get_tyty ();
+ rust_assert (t->get_kind () == TyTy::TypeKind::FNDEF);
+ auto ft = static_cast<TyTy::FnType *> (t);
+
+ if (ft->get_id () == fntype->get_id ())
+ {
+ ref = item;
+ break;
+ }
+ offs++;
+ }
+
+ if (ref == nullptr)
+ return error_mark_node;
+
+ // get any indirection sorted out
+ if (receiver->get_kind () == TyTy::TypeKind::REF)
+ {
+ tree indirect = indirect_expression (receiver_ref, expr_locus);
+ receiver_ref = indirect;
+ }
+
+ // cast it to the correct fntype
+ tree expected_fntype = TyTyResolveCompile::compile (ctx, fntype, true);
+ tree idx = build_int_cst (size_type_node, offs);
+
+ tree vtable_ptr
+ = ctx->get_backend ()->struct_field_expression (receiver_ref, 1,
+ expr_locus);
+ tree vtable_array_access = build4_loc (expr_locus.gcc_location (), ARRAY_REF,
+ TREE_TYPE (TREE_TYPE (vtable_ptr)),
+ vtable_ptr, idx, NULL_TREE, NULL_TREE);
+
+ tree vcall
+ = build3_loc (expr_locus.gcc_location (), OBJ_TYPE_REF, expected_fntype,
+ vtable_array_access, receiver_ref, idx);
+
+ return vcall;
+}
+
+tree
+CompileExpr::get_receiver_from_dyn (const TyTy::DynamicObjectType *dyn,
+ TyTy::BaseType *receiver,
+ TyTy::FnType *fntype, tree receiver_ref,
+ Location expr_locus)
+{
+ // get any indirection sorted out
+ if (receiver->get_kind () == TyTy::TypeKind::REF)
+ {
+ tree indirect = indirect_expression (receiver_ref, expr_locus);
+ receiver_ref = indirect;
+ }
+
+ // access the offs + 1 for the fnptr and offs=0 for the reciever obj
+ return ctx->get_backend ()->struct_field_expression (receiver_ref, 0,
+ expr_locus);
+}
+
+tree
+CompileExpr::resolve_method_address (TyTy::FnType *fntype, HirId ref,
+ TyTy::BaseType *receiver,
+ HIR::PathIdentSegment &segment,
+ Analysis::NodeMapping expr_mappings,
+ Location expr_locus)
+{
+ // lookup compiled functions since it may have already been compiled
+ tree fn = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &fn))
+ {
+ return address_expression (fn, expr_locus);
+ }
+
+ // Now we can try and resolve the address since this might be a forward
+ // declared function, generic function which has not be compiled yet or
+ // its an not yet trait bound function
+ HIR::ImplItem *resolved_item
+ = ctx->get_mappings ()->lookup_hir_implitem (ref, nullptr);
+ if (resolved_item != nullptr)
+ {
+ if (!fntype->has_subsititions_defined ())
+ return CompileInherentImplItem::Compile (resolved_item, ctx);
+
+ return CompileInherentImplItem::Compile (resolved_item, ctx, fntype);
+ }
+
+ // it might be resolved to a trait item
+ HIR::TraitItem *trait_item
+ = ctx->get_mappings ()->lookup_hir_trait_item (ref);
+ HIR::Trait *trait = ctx->get_mappings ()->lookup_trait_item_mapping (
+ trait_item->get_mappings ().get_hirid ());
+
+ Resolver::TraitReference *trait_ref
+ = &Resolver::TraitReference::error_node ();
+ bool ok = ctx->get_tyctx ()->lookup_trait_reference (
+ trait->get_mappings ().get_defid (), &trait_ref);
+ rust_assert (ok);
+
+ // the type resolver can only resolve type bounds to their trait
+ // item so its up to us to figure out if this path should resolve
+ // to an trait-impl-block-item or if it can be defaulted to the
+ // trait-impl-item's definition
+
+ auto root = receiver->get_root ();
+ std::vector<Resolver::PathProbeCandidate> candidates
+ = Resolver::PathProbeType::Probe (root, segment, true /* probe_impls */,
+ false /* probe_bounds */,
+ true /* ignore_mandatory_trait_items */);
+ if (candidates.size () == 0)
+ {
+ // this means we are defaulting back to the trait_item if
+ // possible
+ Resolver::TraitItemReference *trait_item_ref = nullptr;
+ bool ok = trait_ref->lookup_hir_trait_item (*trait_item, &trait_item_ref);
+ rust_assert (ok); // found
+ rust_assert (trait_item_ref->is_optional ()); // has definition
+
+ // FIXME Optional means it has a definition and an associated
+ // block which can be a default implementation, if it does not
+ // contain an implementation we should actually return
+ // error_mark_node
+
+ return CompileTraitItem::Compile (trait_item_ref->get_hir_trait_item (),
+ ctx, fntype, true, expr_locus);
+ }
+ else
+ {
+ // FIXME this will be a case to return error_mark_node, there is
+ // an error scenario where a Trait Foo has a method Bar, but this
+ // receiver does not implement this trait or has an incompatible
+ // implementation and we should just return error_mark_node
+
+ rust_assert (candidates.size () == 1);
+ auto &candidate = candidates.at (0);
+ rust_assert (candidate.is_impl_candidate ());
+ rust_assert (candidate.ty->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *candidate_call = static_cast<TyTy::FnType *> (candidate.ty);
+
+ HIR::ImplItem *impl_item = candidate.item.impl.impl_item;
+ if (!candidate_call->has_subsititions_defined ())
+ return CompileInherentImplItem::Compile (impl_item, ctx);
+
+ TyTy::BaseType *monomorphized = candidate_call;
+ if (candidate_call->needs_generic_substitutions ())
+ {
+ TyTy::BaseType *infer_impl_call
+ = candidate_call->infer_substitions (expr_locus);
+ monomorphized = infer_impl_call->unify (fntype);
+ }
+
+ return CompileInherentImplItem::Compile (impl_item, ctx, monomorphized);
+ }
+}
+
+tree
+CompileExpr::resolve_operator_overload (
+ Analysis::RustLangItem::ItemType lang_item_type, HIR::OperatorExprMeta expr,
+ tree lhs, tree rhs, HIR::Expr *lhs_expr, HIR::Expr *rhs_expr)
+{
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ rust_assert (is_op_overload);
+
+ // lookup the resolved name
+ NodeId resolved_node_id = UNKNOWN_NODEID;
+ bool ok = ctx->get_resolver ()->lookup_resolved_name (
+ expr.get_mappings ().get_nodeid (), &resolved_node_id);
+ rust_assert (ok);
+
+ // reverse lookup
+ HirId ref;
+ ok = ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref);
+ rust_assert (ok);
+
+ TyTy::BaseType *receiver = nullptr;
+ ok = ctx->get_tyctx ()->lookup_receiver (expr.get_mappings ().get_hirid (),
+ &receiver);
+ rust_assert (ok);
+
+ bool is_generic_receiver = receiver->get_kind () == TyTy::TypeKind::PARAM;
+ if (is_generic_receiver)
+ {
+ TyTy::ParamType *p = static_cast<TyTy::ParamType *> (receiver);
+ receiver = p->resolve ();
+ }
+
+ // lookup compiled functions since it may have already been compiled
+ HIR::PathIdentSegment segment_name (
+ Analysis::RustLangItem::ToString (lang_item_type));
+ tree fn_expr
+ = resolve_method_address (fntype, ref, receiver, segment_name,
+ expr.get_mappings (), expr.get_locus ());
+
+ // lookup the autoderef mappings
+ std::vector<Resolver::Adjustment> *adjustments = nullptr;
+ ok = ctx->get_tyctx ()->lookup_autoderef_mappings (
+ expr.get_lvalue_mappings ().get_hirid (), &adjustments);
+ rust_assert (ok);
+
+ // apply adjustments for the fn call
+ tree self = resolve_adjustements (*adjustments, lhs, lhs_expr->get_locus ());
+
+ std::vector<tree> args;
+ args.push_back (self); // adjusted self
+ if (rhs != nullptr) // can be null for negation_expr (unary ones)
+ args.push_back (rhs);
+
+ return ctx->get_backend ()->call_expression (fn_expr, args, nullptr,
+ expr.get_locus ());
+}
+
+tree
+CompileExpr::compile_bool_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::BOOL);
+
+ const auto literal_value = expr.get_literal ();
+ bool bval = literal_value.as_string ().compare ("true") == 0;
+ return ctx->get_backend ()->boolean_constant_expression (bval);
+}
+
+tree
+CompileExpr::compile_integer_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::INT);
+ const auto literal_value = expr.get_literal ();
+
+ tree type = TyTyResolveCompile::compile (ctx, tyty);
+
+ mpz_t ival;
+ if (mpz_init_set_str (ival, literal_value.as_string ().c_str (), 10) != 0)
+ {
+ rust_error_at (expr.get_locus (), "bad number in literal");
+ return error_mark_node;
+ }
+
+ mpz_t type_min;
+ mpz_t type_max;
+ mpz_init (type_min);
+ mpz_init (type_max);
+ get_type_static_bounds (type, type_min, type_max);
+
+ if (mpz_cmp (ival, type_min) < 0 || mpz_cmp (ival, type_max) > 0)
+ {
+ rust_error_at (expr.get_locus (),
+ "integer overflows the respective type %<%s%>",
+ tyty->get_name ().c_str ());
+ return error_mark_node;
+ }
+
+ tree result = wide_int_to_tree (type, wi::from_mpz (type, ival, true));
+
+ mpz_clear (type_min);
+ mpz_clear (type_max);
+ mpz_clear (ival);
+
+ return result;
+}
+
+tree
+CompileExpr::compile_float_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::FLOAT);
+ const auto literal_value = expr.get_literal ();
+
+ mpfr_t fval;
+ if (mpfr_init_set_str (fval, literal_value.as_string ().c_str (), 10,
+ MPFR_RNDN)
+ != 0)
+ {
+ rust_error_at (expr.get_locus (), "bad number in literal");
+ return error_mark_node;
+ }
+
+ tree type = TyTyResolveCompile::compile (ctx, tyty);
+
+ // taken from:
+ // see go/gofrontend/expressions.cc:check_float_type
+ mpfr_exp_t exp = mpfr_get_exp (fval);
+ bool real_value_overflow = exp > TYPE_PRECISION (type);
+
+ REAL_VALUE_TYPE r1;
+ real_from_mpfr (&r1, fval, type, GMP_RNDN);
+ REAL_VALUE_TYPE r2;
+ real_convert (&r2, TYPE_MODE (type), &r1);
+
+ tree real_value = build_real (type, r2);
+ if (TREE_OVERFLOW (real_value) || real_value_overflow)
+ {
+ rust_error_at (expr.get_locus (),
+ "decimal overflows the respective type %<%s%>",
+ tyty->get_name ().c_str ());
+ return error_mark_node;
+ }
+
+ return real_value;
+}
+
+tree
+CompileExpr::compile_char_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::CHAR);
+ const auto literal_value = expr.get_literal ();
+
+ // FIXME needs wchar_t
+ char c = literal_value.as_string ().c_str ()[0];
+ return ctx->get_backend ()->wchar_constant_expression (c);
+}
+
+tree
+CompileExpr::compile_byte_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::BYTE);
+ const auto literal_value = expr.get_literal ();
+
+ tree type = TyTyResolveCompile::compile (ctx, tyty);
+ char c = literal_value.as_string ().c_str ()[0];
+ return build_int_cst (type, c);
+}
+
+tree
+CompileExpr::compile_string_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ tree fat_pointer = TyTyResolveCompile::compile (ctx, tyty);
+
+ rust_assert (expr.get_lit_type () == HIR::Literal::STRING);
+ const auto literal_value = expr.get_literal ();
+
+ auto base = ctx->get_backend ()->string_constant_expression (
+ literal_value.as_string ());
+ tree data = address_expression (base, expr.get_locus ());
+
+ TyTy::BaseType *usize = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_builtin ("usize", &usize);
+ rust_assert (ok);
+ tree type = TyTyResolveCompile::compile (ctx, usize);
+
+ tree size = build_int_cstu (type, literal_value.as_string ().size ());
+
+ return ctx->get_backend ()->constructor_expression (fat_pointer, false,
+ {data, size}, -1,
+ expr.get_locus ());
+}
+
+tree
+CompileExpr::compile_byte_string_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::BYTE_STRING);
+
+ // the type here is &[ty; capacity]
+ rust_assert (tyty->get_kind () == TyTy::TypeKind::REF);
+ const auto ref_tyty = static_cast<const TyTy::ReferenceType *> (tyty);
+ auto base_tyty = ref_tyty->get_base ();
+ rust_assert (base_tyty->get_kind () == TyTy::TypeKind::ARRAY);
+ auto array_tyty = static_cast<TyTy::ArrayType *> (base_tyty);
+
+ std::string value_str = expr.get_literal ().as_string ();
+ std::vector<tree> vals;
+ std::vector<unsigned long> indexes;
+ for (size_t i = 0; i < value_str.size (); i++)
+ {
+ char b = value_str.at (i);
+ tree bb = ctx->get_backend ()->char_constant_expression (b);
+ vals.push_back (bb);
+ indexes.push_back (i);
+ }
+
+ tree array_type = TyTyResolveCompile::compile (ctx, array_tyty);
+ tree constructed
+ = ctx->get_backend ()->array_constructor_expression (array_type, indexes,
+ vals,
+ expr.get_locus ());
+
+ return address_expression (constructed, expr.get_locus ());
+}
+
+tree
+CompileExpr::type_cast_expression (tree type_to_cast_to, tree expr_tree,
+ Location location)
+{
+ if (type_to_cast_to == error_mark_node || expr_tree == error_mark_node
+ || TREE_TYPE (expr_tree) == error_mark_node)
+ return error_mark_node;
+
+ if (ctx->get_backend ()->type_size (type_to_cast_to) == 0
+ || TREE_TYPE (expr_tree) == void_type_node)
+ {
+ // Do not convert zero-sized types.
+ return expr_tree;
+ }
+ else if (TREE_CODE (type_to_cast_to) == INTEGER_TYPE)
+ {
+ tree cast = convert_to_integer (type_to_cast_to, expr_tree);
+ // FIXME check for TREE_OVERFLOW?
+ return cast;
+ }
+ else if (TREE_CODE (type_to_cast_to) == REAL_TYPE)
+ {
+ tree cast = convert_to_real (type_to_cast_to, expr_tree);
+ // FIXME
+ // We might need to check that the tree is MAX val and thusly saturate it
+ // to inf. we can get the bounds and check the value if its >= or <= to
+ // the min and max bounds
+ //
+ // https://github.com/Rust-GCC/gccrs/issues/635
+ return cast;
+ }
+ else if (TREE_CODE (type_to_cast_to) == COMPLEX_TYPE)
+ {
+ return convert_to_complex (type_to_cast_to, expr_tree);
+ }
+ else if (TREE_CODE (type_to_cast_to) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (expr_tree)) == INTEGER_TYPE)
+ {
+ return convert_to_pointer (type_to_cast_to, expr_tree);
+ }
+ else if (TREE_CODE (type_to_cast_to) == RECORD_TYPE
+ || TREE_CODE (type_to_cast_to) == ARRAY_TYPE)
+ {
+ return fold_build1_loc (location.gcc_location (), VIEW_CONVERT_EXPR,
+ type_to_cast_to, expr_tree);
+ }
+ else if (TREE_CODE (type_to_cast_to) == POINTER_TYPE
+ && SLICE_TYPE_P (TREE_TYPE (expr_tree)))
+ {
+ // returning a raw cast using NOP_EXPR seems to resut in an ICE:
+ //
+ // Analyzing compilation unit
+ // Performing interprocedural optimizations
+ // <*free_lang_data> {heap 2644k} <visibility> {heap 2644k}
+ // <build_ssa_passes> {heap 2644k} <opt_local_passes> {heap 2644k}during
+ // GIMPLE pass: cddce
+ // In function ‘*T::as_ptr<i32>’:
+ // rust1: internal compiler error: in propagate_necessity, at
+ // tree-ssa-dce.cc:984 0x1d5b43e propagate_necessity
+ // ../../gccrs/gcc/tree-ssa-dce.cc:984
+ // 0x1d5e180 perform_tree_ssa_dce
+ // ../../gccrs/gcc/tree-ssa-dce.cc:1876
+ // 0x1d5e2c8 tree_ssa_cd_dce
+ // ../../gccrs/gcc/tree-ssa-dce.cc:1920
+ // 0x1d5e49a execute
+ // ../../gccrs/gcc/tree-ssa-dce.cc:1992
+
+ // this is returning the direct raw pointer of the slice an assumes a very
+ // specific layout
+ return ctx->get_backend ()->struct_field_expression (expr_tree, 0,
+ location);
+ }
+
+ return fold_convert_loc (location.gcc_location (), type_to_cast_to,
+ expr_tree);
+}
+
+void
+CompileExpr::visit (HIR::ArrayExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_fatal_error (expr.get_locus (),
+ "did not resolve type for this array expr");
+ return;
+ }
+
+ tree array_type = TyTyResolveCompile::compile (ctx, tyty);
+ if (TREE_CODE (array_type) != ARRAY_TYPE)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ rust_assert (tyty->get_kind () == TyTy::TypeKind::ARRAY);
+ const TyTy::ArrayType &array_tyty
+ = static_cast<const TyTy::ArrayType &> (*tyty);
+
+ HIR::ArrayElems &elements = *expr.get_internal_elements ();
+ switch (elements.get_array_expr_type ())
+ {
+ case HIR::ArrayElems::ArrayExprType::VALUES: {
+ HIR::ArrayElemsValues &elems
+ = static_cast<HIR::ArrayElemsValues &> (elements);
+ translated
+ = array_value_expr (expr.get_locus (), array_tyty, array_type, elems);
+ }
+ return;
+
+ case HIR::ArrayElems::ArrayExprType::COPIED:
+ HIR::ArrayElemsCopied &elems
+ = static_cast<HIR::ArrayElemsCopied &> (elements);
+ translated
+ = array_copied_expr (expr.get_locus (), array_tyty, array_type, elems);
+ }
+}
+
+tree
+CompileExpr::array_value_expr (Location expr_locus,
+ const TyTy::ArrayType &array_tyty,
+ tree array_type, HIR::ArrayElemsValues &elems)
+{
+ std::vector<unsigned long> indexes;
+ std::vector<tree> constructor;
+ size_t i = 0;
+ for (auto &elem : elems.get_values ())
+ {
+ tree translated_expr = CompileExpr::Compile (elem.get (), ctx);
+ constructor.push_back (translated_expr);
+ indexes.push_back (i++);
+ }
+
+ return ctx->get_backend ()->array_constructor_expression (array_type, indexes,
+ constructor,
+ expr_locus);
+}
+
+tree
+CompileExpr::array_copied_expr (Location expr_locus,
+ const TyTy::ArrayType &array_tyty,
+ tree array_type, HIR::ArrayElemsCopied &elems)
+{
+ // see gcc/cp/typeck2.cc:1369-1401
+ gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
+ tree domain = TYPE_DOMAIN (array_type);
+ if (!domain)
+ return error_mark_node;
+
+ if (!TREE_CONSTANT (TYPE_MAX_VALUE (domain)))
+ {
+ rust_error_at (expr_locus, "non const capacity domain %qT", array_type);
+ return error_mark_node;
+ }
+
+ tree capacity_expr = CompileExpr::Compile (elems.get_num_copies_expr (), ctx);
+ if (!TREE_CONSTANT (capacity_expr))
+ {
+ rust_error_at (expr_locus, "non const num copies %qT", array_type);
+ return error_mark_node;
+ }
+
+ // get the compiled value
+ tree translated_expr = CompileExpr::Compile (elems.get_elem_to_copy (), ctx);
+
+ tree max_domain = TYPE_MAX_VALUE (domain);
+ tree min_domain = TYPE_MIN_VALUE (domain);
+
+ auto max = wi::to_offset (max_domain);
+ auto min = wi::to_offset (min_domain);
+ auto precision = TYPE_PRECISION (TREE_TYPE (domain));
+ auto sign = TYPE_SIGN (TREE_TYPE (domain));
+ unsigned HOST_WIDE_INT len
+ = wi::ext (max - min + 1, precision, sign).to_uhwi ();
+
+ // In a const context we must initialize the entire array, which entails
+ // allocating for each element. If the user wants a huge array, we will OOM
+ // and die horribly.
+ if (ctx->const_context_p ())
+ {
+ size_t idx = 0;
+ std::vector<unsigned long> indexes;
+ std::vector<tree> constructor;
+ for (unsigned HOST_WIDE_INT i = 0; i < len; i++)
+ {
+ constructor.push_back (translated_expr);
+ indexes.push_back (idx++);
+ }
+
+ return ctx->get_backend ()->array_constructor_expression (array_type,
+ indexes,
+ constructor,
+ expr_locus);
+ }
+
+ else
+ {
+ // Create a new block scope in which to initialize the array
+ tree fndecl = NULL_TREE;
+ if (ctx->in_fn ())
+ fndecl = ctx->peek_fn ().fndecl;
+
+ std::vector<Bvariable *> locals;
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree init_block
+ = ctx->get_backend ()->block (fndecl, enclosing_scope, locals,
+ expr_locus, expr_locus);
+ ctx->push_block (init_block);
+
+ tree tmp;
+ tree stmts
+ = ctx->get_backend ()->array_initializer (fndecl, init_block,
+ array_type, capacity_expr,
+ translated_expr, &tmp,
+ expr_locus);
+ ctx->add_statement (stmts);
+
+ tree block = ctx->pop_block ();
+
+ // The result is a compound expression which creates a temporary array,
+ // initializes all the elements in a loop, and then yeilds the array.
+ return ctx->get_backend ()->compound_expression (block, tmp, expr_locus);
+ }
+}
+
+tree
+HIRCompileBase::resolve_adjustements (
+ std::vector<Resolver::Adjustment> &adjustments, tree expression,
+ Location locus)
+{
+ tree e = expression;
+ for (auto &adjustment : adjustments)
+ {
+ switch (adjustment.get_type ())
+ {
+ case Resolver::Adjustment::AdjustmentType::ERROR:
+ return error_mark_node;
+
+ case Resolver::Adjustment::AdjustmentType::IMM_REF:
+ case Resolver::Adjustment::AdjustmentType::MUT_REF: {
+ if (!SLICE_TYPE_P (TREE_TYPE (e)))
+ {
+ e = address_expression (e, locus);
+ }
+ }
+ break;
+
+ case Resolver::Adjustment::AdjustmentType::DEREF:
+ case Resolver::Adjustment::AdjustmentType::DEREF_MUT:
+ e = resolve_deref_adjustment (adjustment, e, locus);
+ break;
+
+ case Resolver::Adjustment::AdjustmentType::INDIRECTION:
+ e = resolve_indirection_adjustment (adjustment, e, locus);
+ break;
+
+ case Resolver::Adjustment::AdjustmentType::UNSIZE:
+ e = resolve_unsized_adjustment (adjustment, e, locus);
+ break;
+ }
+ }
+
+ return e;
+}
+
+tree
+HIRCompileBase::resolve_deref_adjustment (Resolver::Adjustment &adjustment,
+ tree expression, Location locus)
+{
+ rust_assert (adjustment.is_deref_adjustment ()
+ || adjustment.is_deref_mut_adjustment ());
+ rust_assert (adjustment.has_operator_overload ());
+
+ TyTy::FnType *lookup = adjustment.get_deref_operator_fn ();
+ HIR::ImplItem *resolved_item = adjustment.get_deref_hir_item ();
+
+ tree fn_address = error_mark_node;
+ if (!lookup->has_subsititions_defined ())
+ fn_address = CompileInherentImplItem::Compile (resolved_item, ctx, nullptr,
+ true, locus);
+ else
+ fn_address = CompileInherentImplItem::Compile (resolved_item, ctx, lookup,
+ true, locus);
+
+ // does it need a reference to call
+ tree adjusted_argument = expression;
+ bool needs_borrow = adjustment.get_deref_adjustment_type ()
+ != Resolver::Adjustment::AdjustmentType::ERROR;
+ if (needs_borrow)
+ {
+ adjusted_argument = address_expression (expression, locus);
+ }
+
+ // make the call
+ return ctx->get_backend ()->call_expression (fn_address, {adjusted_argument},
+ nullptr, locus);
+}
+
+tree
+HIRCompileBase::resolve_indirection_adjustment (
+ Resolver::Adjustment &adjustment, tree expression, Location locus)
+{
+ return indirect_expression (expression, locus);
+}
+
+tree
+HIRCompileBase::resolve_unsized_adjustment (Resolver::Adjustment &adjustment,
+ tree expression, Location locus)
+{
+ bool expect_slice
+ = adjustment.get_expected ()->get_kind () == TyTy::TypeKind::SLICE;
+ bool expect_dyn
+ = adjustment.get_expected ()->get_kind () == TyTy::TypeKind::DYNAMIC;
+
+ // assumes this is an array
+ tree expr_type = TREE_TYPE (expression);
+ if (expect_slice)
+ {
+ rust_assert (TREE_CODE (expr_type) == ARRAY_TYPE);
+ return resolve_unsized_slice_adjustment (adjustment, expression, locus);
+ }
+
+ rust_assert (expect_dyn);
+ return resolve_unsized_dyn_adjustment (adjustment, expression, locus);
+}
+
+tree
+HIRCompileBase::resolve_unsized_slice_adjustment (
+ Resolver::Adjustment &adjustment, tree expression, Location locus)
+{
+ // assumes this is an array
+ tree expr_type = TREE_TYPE (expression);
+ rust_assert (TREE_CODE (expr_type) == ARRAY_TYPE);
+
+ // takes an array and returns a fat-pointer so this becomes a constructor
+ // expression
+ rust_assert (adjustment.get_expected ()->get_kind ()
+ == TyTy::TypeKind::SLICE);
+ tree fat_pointer
+ = TyTyResolveCompile::compile (ctx, adjustment.get_expected ());
+
+ // make a constructor for this
+ tree data = address_expression (expression, locus);
+
+ // fetch the size from the domain
+ tree domain = TYPE_DOMAIN (expr_type);
+ unsigned HOST_WIDE_INT array_size
+ = wi::ext (wi::to_offset (TYPE_MAX_VALUE (domain))
+ - wi::to_offset (TYPE_MIN_VALUE (domain)) + 1,
+ TYPE_PRECISION (TREE_TYPE (domain)),
+ TYPE_SIGN (TREE_TYPE (domain)))
+ .to_uhwi ();
+ tree size = build_int_cstu (size_type_node, array_size);
+
+ return ctx->get_backend ()->constructor_expression (fat_pointer, false,
+ {data, size}, -1, locus);
+}
+
+tree
+HIRCompileBase::resolve_unsized_dyn_adjustment (
+ Resolver::Adjustment &adjustment, tree expression, Location locus)
+{
+ tree rvalue = expression;
+ Location rvalue_locus = locus;
+
+ const TyTy::BaseType *actual = adjustment.get_actual ();
+ const TyTy::BaseType *expected = adjustment.get_expected ();
+
+ const TyTy::DynamicObjectType *dyn
+ = static_cast<const TyTy::DynamicObjectType *> (expected);
+
+ rust_debug ("resolve_unsized_dyn_adjustment actual={%s} dyn={%s}",
+ actual->debug_str ().c_str (), dyn->debug_str ().c_str ());
+
+ return coerce_to_dyn_object (rvalue, actual, dyn, rvalue_locus);
+}
+
+void
+CompileExpr::visit (HIR::RangeFromToExpr &expr)
+{
+ tree from = CompileExpr::Compile (expr.get_from_expr ().get (), ctx);
+ tree to = CompileExpr::Compile (expr.get_to_expr ().get (), ctx);
+ if (from == error_mark_node || to == error_mark_node)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+
+ // make the constructor
+ translated
+ = ctx->get_backend ()->constructor_expression (adt, false, {from, to}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::RangeFromExpr &expr)
+{
+ tree from = CompileExpr::Compile (expr.get_from_expr ().get (), ctx);
+ if (from == error_mark_node)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+
+ // make the constructor
+ translated
+ = ctx->get_backend ()->constructor_expression (adt, false, {from}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::RangeToExpr &expr)
+{
+ tree to = CompileExpr::Compile (expr.get_to_expr ().get (), ctx);
+ if (to == error_mark_node)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+
+ // make the constructor
+ translated
+ = ctx->get_backend ()->constructor_expression (adt, false, {to}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::RangeFullExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+ translated = ctx->get_backend ()->constructor_expression (adt, false, {}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::RangeFromToInclExpr &expr)
+{
+ tree from = CompileExpr::Compile (expr.get_from_expr ().get (), ctx);
+ tree to = CompileExpr::Compile (expr.get_to_expr ().get (), ctx);
+ if (from == error_mark_node || to == error_mark_node)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+
+ // make the constructor
+ translated
+ = ctx->get_backend ()->constructor_expression (adt, false, {from, to}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::ArrayIndexExpr &expr)
+{
+ tree array_reference = CompileExpr::Compile (expr.get_array_expr (), ctx);
+ tree index = CompileExpr::Compile (expr.get_index_expr (), ctx);
+
+ // this might be an core::ops::index lang item situation
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type = Analysis::RustLangItem::ItemType::INDEX;
+ tree operator_overload_call
+ = resolve_operator_overload (lang_item_type, expr, array_reference,
+ index, expr.get_array_expr (),
+ expr.get_index_expr ());
+
+ tree actual_type = TREE_TYPE (operator_overload_call);
+ bool can_indirect = TYPE_PTR_P (actual_type) || TYPE_REF_P (actual_type);
+ if (!can_indirect)
+ {
+ // nothing to do
+ translated = operator_overload_call;
+ return;
+ }
+
+ // rust deref always returns a reference from this overload then we can
+ // actually do the indirection
+ translated
+ = indirect_expression (operator_overload_call, expr.get_locus ());
+ return;
+ }
+
+ // lets check if the array is a reference type then we can add an
+ // indirection if required
+ TyTy::BaseType *array_expr_ty = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ expr.get_array_expr ()->get_mappings ().get_hirid (), &array_expr_ty);
+ rust_assert (ok);
+
+ // do we need to add an indirect reference
+ if (array_expr_ty->get_kind () == TyTy::TypeKind::REF)
+ {
+ array_reference
+ = indirect_expression (array_reference, expr.get_locus ());
+ }
+
+ translated
+ = ctx->get_backend ()->array_index_expression (array_reference, index,
+ expr.get_locus ());
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-expr.h b/gcc/rust/backend/rust-compile-expr.h
new file mode 100644
index 00000000000..4c1f95ade29
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-expr.h
@@ -0,0 +1,148 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_EXPR
+#define RUST_COMPILE_EXPR
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileExpr : private HIRCompileBase, protected HIR::HIRExpressionVisitor
+{
+public:
+ static tree Compile (HIR::Expr *expr, Context *ctx);
+
+ void visit (HIR::TupleIndexExpr &expr) override;
+ void visit (HIR::TupleExpr &expr) override;
+ void visit (HIR::ReturnExpr &expr) override;
+ void visit (HIR::CallExpr &expr) override;
+ void visit (HIR::MethodCallExpr &expr) override;
+ void visit (HIR::LiteralExpr &expr) override;
+ void visit (HIR::AssignmentExpr &expr) override;
+ void visit (HIR::CompoundAssignmentExpr &expr) override;
+ void visit (HIR::ArrayIndexExpr &expr) override;
+ void visit (HIR::ArrayExpr &expr) override;
+ void visit (HIR::ArithmeticOrLogicalExpr &expr) override;
+ void visit (HIR::ComparisonExpr &expr) override;
+ void visit (HIR::LazyBooleanExpr &expr) override;
+ void visit (HIR::NegationExpr &expr) override;
+ void visit (HIR::TypeCastExpr &expr) override;
+ void visit (HIR::IfExpr &expr) override;
+ void visit (HIR::IfExprConseqIf &expr) override;
+ void visit (HIR::IfExprConseqElse &expr) override;
+ void visit (HIR::BlockExpr &expr) override;
+ void visit (HIR::UnsafeBlockExpr &expr) override;
+ void visit (HIR::StructExprStruct &struct_expr) override;
+ void visit (HIR::StructExprStructFields &struct_expr) override;
+ void visit (HIR::GroupedExpr &expr) override;
+ void visit (HIR::FieldAccessExpr &expr) override;
+ void visit (HIR::QualifiedPathInExpression &expr) override;
+ void visit (HIR::PathInExpression &expr) override;
+ void visit (HIR::LoopExpr &expr) override;
+ void visit (HIR::WhileLoopExpr &expr) override;
+ void visit (HIR::BreakExpr &expr) override;
+ void visit (HIR::ContinueExpr &expr) override;
+ void visit (HIR::BorrowExpr &expr) override;
+ void visit (HIR::DereferenceExpr &expr) override;
+ void visit (HIR::MatchExpr &expr) override;
+ void visit (HIR::RangeFromToExpr &expr) override;
+ void visit (HIR::RangeFromExpr &expr) override;
+ void visit (HIR::RangeToExpr &expr) override;
+ void visit (HIR::RangeFullExpr &expr) override;
+ void visit (HIR::RangeFromToInclExpr &expr) override;
+
+ // Empty visit for unused Expression HIR nodes.
+ void visit (HIR::ClosureExprInner &) override {}
+ void visit (HIR::ClosureExprInnerTyped &) override {}
+ void visit (HIR::StructExprFieldIdentifier &) override {}
+ void visit (HIR::StructExprFieldIdentifierValue &) override {}
+ void visit (HIR::StructExprFieldIndexValue &) override {}
+ void visit (HIR::ErrorPropagationExpr &) override {}
+ void visit (HIR::RangeToInclExpr &) override {}
+ void visit (HIR::WhileLetLoopExpr &) override {}
+ void visit (HIR::ForLoopExpr &) override {}
+ void visit (HIR::IfExprConseqIfLet &) override {}
+ void visit (HIR::IfLetExpr &) override {}
+ void visit (HIR::IfLetExprConseqElse &) override {}
+ void visit (HIR::IfLetExprConseqIf &) override {}
+ void visit (HIR::IfLetExprConseqIfLet &) override {}
+ void visit (HIR::AwaitExpr &) override {}
+ void visit (HIR::AsyncBlockExpr &) override {}
+
+protected:
+ tree get_fn_addr_from_dyn (const TyTy::DynamicObjectType *dyn,
+ TyTy::BaseType *receiver, TyTy::FnType *fntype,
+ tree receiver_ref, Location expr_locus);
+
+ tree get_receiver_from_dyn (const TyTy::DynamicObjectType *dyn,
+ TyTy::BaseType *receiver, TyTy::FnType *fntype,
+ tree receiver_ref, Location expr_locus);
+
+ tree resolve_method_address (TyTy::FnType *fntype, HirId ref,
+ TyTy::BaseType *receiver,
+ HIR::PathIdentSegment &segment,
+ Analysis::NodeMapping expr_mappings,
+ Location expr_locus);
+
+ tree
+ resolve_operator_overload (Analysis::RustLangItem::ItemType lang_item_type,
+ HIR::OperatorExprMeta expr, tree lhs, tree rhs,
+ HIR::Expr *lhs_expr, HIR::Expr *rhs_expr);
+
+ tree compile_bool_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_integer_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_float_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_char_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_byte_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_string_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_byte_string_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree type_cast_expression (tree type_to_cast_to, tree expr, Location locus);
+
+ tree array_value_expr (Location expr_locus, const TyTy::ArrayType &array_tyty,
+ tree array_type, HIR::ArrayElemsValues &elems);
+
+ tree array_copied_expr (Location expr_locus,
+ const TyTy::ArrayType &array_tyty, tree array_type,
+ HIR::ArrayElemsCopied &elems);
+
+private:
+ CompileExpr (Context *ctx);
+
+ tree translated;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_EXPR
diff --git a/gcc/rust/backend/rust-compile-extern.h b/gcc/rust/backend/rust-compile-extern.h
new file mode 100644
index 00000000000..45a507e03be
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-extern.h
@@ -0,0 +1,172 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_EXTERN_ITEM
+#define RUST_COMPILE_EXTERN_ITEM
+
+#include "rust-compile-base.h"
+#include "rust-compile-intrinsic.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileExternItem : public HIRCompileBase,
+ public HIR::HIRExternalItemVisitor
+{
+public:
+ static tree compile (HIR::ExternalItem *item, Context *ctx,
+ TyTy::BaseType *concrete = nullptr,
+ bool is_query_mode = false,
+ Location ref_locus = Location ())
+ {
+ CompileExternItem compiler (ctx, concrete, ref_locus);
+ item->accept_vis (compiler);
+
+ if (is_query_mode && compiler.reference == error_mark_node)
+ rust_internal_error_at (ref_locus, "failed to compile extern item: %s",
+ item->as_string ().c_str ());
+
+ return compiler.reference;
+ }
+
+ void visit (HIR::ExternalStaticItem &item) override
+ {
+ // check if its already been compiled
+ Bvariable *lookup = ctx->get_backend ()->error_variable ();
+ if (ctx->lookup_var_decl (item.get_mappings ().get_hirid (), &lookup))
+ {
+ reference = ctx->get_backend ()->var_expression (lookup, ref_locus);
+ return;
+ }
+
+ TyTy::BaseType *resolved_type = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (item.get_mappings ().get_hirid (),
+ &resolved_type);
+ rust_assert (ok);
+
+ std::string name = item.get_item_name ();
+ // FIXME this is assuming C ABI
+ std::string asm_name = name;
+
+ tree type = TyTyResolveCompile::compile (ctx, resolved_type);
+ bool is_external = true;
+ bool is_hidden = false;
+ bool in_unique_section = false;
+
+ Bvariable *static_global
+ = ctx->get_backend ()->global_variable (name, asm_name, type, is_external,
+ is_hidden, in_unique_section,
+ item.get_locus ());
+ ctx->insert_var_decl (item.get_mappings ().get_hirid (), static_global);
+ ctx->push_var (static_global);
+
+ reference = ctx->get_backend ()->var_expression (static_global, ref_locus);
+ }
+
+ void visit (HIR::ExternalFunctionItem &function) override
+ {
+ TyTy::BaseType *fntype_tyty;
+ if (!ctx->get_tyctx ()->lookup_type (function.get_mappings ().get_hirid (),
+ &fntype_tyty))
+ {
+ rust_fatal_error (function.get_locus (),
+ "failed to lookup function type");
+ return;
+ }
+
+ rust_assert (fntype_tyty->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (fntype_tyty);
+ if (fntype->has_subsititions_defined ())
+ {
+ // we cant do anything for this only when it is used and a concrete type
+ // is given
+ if (concrete == nullptr)
+ return;
+ else
+ {
+ rust_assert (concrete->get_kind () == TyTy::TypeKind::FNDEF);
+ fntype = static_cast<TyTy::FnType *> (concrete);
+ }
+ }
+
+ // items can be forward compiled which means we may not need to invoke this
+ // code. We might also have already compiled this generic function as well.
+ tree lookup = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &lookup,
+ fntype->get_id (), fntype))
+ {
+ reference = address_expression (lookup, ref_locus);
+ return;
+ }
+
+ if (fntype->has_subsititions_defined ())
+ {
+ // override the Hir Lookups for the substituions in this context
+ fntype->override_context ();
+ }
+
+ if (fntype->get_abi () == ABI::INTRINSIC)
+ {
+ Intrinsics compile (ctx);
+ tree fndecl = compile.compile (fntype);
+ ctx->insert_function_decl (fntype, fndecl);
+ return;
+ }
+
+ tree compiled_fn_type = TyTyResolveCompile::compile (ctx, fntype);
+ std::string ir_symbol_name = function.get_item_name ();
+ std::string asm_name = function.get_item_name ();
+ if (fntype->get_abi () == ABI::RUST)
+ {
+ // then we need to get the canonical path of it and mangle it
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ bool ok = ctx->get_mappings ()->lookup_canonical_path (
+ function.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ ir_symbol_name = canonical_path->get () + fntype->subst_as_string ();
+ asm_name = ctx->mangle_item (fntype, *canonical_path);
+ }
+
+ const unsigned int flags = Backend::function_is_declaration;
+ tree fndecl
+ = ctx->get_backend ()->function (compiled_fn_type, ir_symbol_name,
+ asm_name, flags, function.get_locus ());
+ TREE_PUBLIC (fndecl) = 1;
+ setup_abi_options (fndecl, fntype->get_abi ());
+
+ ctx->insert_function_decl (fntype, fndecl);
+
+ reference = address_expression (fndecl, ref_locus);
+ }
+
+private:
+ CompileExternItem (Context *ctx, TyTy::BaseType *concrete, Location ref_locus)
+ : HIRCompileBase (ctx), concrete (concrete), reference (error_mark_node),
+ ref_locus (ref_locus)
+ {}
+
+ TyTy::BaseType *concrete;
+ tree reference;
+ Location ref_locus;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_EXTERN_ITEM
diff --git a/gcc/rust/backend/rust-compile-fnparam.cc b/gcc/rust/backend/rust-compile-fnparam.cc
new file mode 100644
index 00000000000..3f0ec82b625
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-fnparam.cc
@@ -0,0 +1,121 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-fnparam.h"
+#include "rust-compile-pattern.h"
+
+#include "gimple-expr.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileFnParam::CompileFnParam (Context *ctx, tree fndecl, tree decl_type,
+ Location locus)
+ : HIRCompileBase (ctx), fndecl (fndecl), decl_type (decl_type), locus (locus),
+ compiled_param (ctx->get_backend ()->error_variable ())
+{}
+
+Bvariable *
+CompileFnParam::compile (Context *ctx, tree fndecl, HIR::FunctionParam *param,
+ tree decl_type, Location locus)
+{
+ CompileFnParam compiler (ctx, fndecl, decl_type, locus);
+ param->get_param_name ()->accept_vis (compiler);
+ return compiler.compiled_param;
+}
+
+Bvariable *
+CompileFnParam::compile (Context *ctx, tree fndecl, HIR::Pattern *param,
+ tree decl_type, Location locus)
+{
+ CompileFnParam compiler (ctx, fndecl, decl_type, locus);
+ param->accept_vis (compiler);
+ return compiler.compiled_param;
+}
+
+void
+CompileFnParam::visit (HIR::IdentifierPattern &pattern)
+{
+ if (!pattern.is_mut ())
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+
+ compiled_param
+ = ctx->get_backend ()->parameter_variable (fndecl,
+ pattern.get_identifier (),
+ decl_type, locus);
+}
+
+void
+CompileFnParam::visit (HIR::WildcardPattern &pattern)
+{
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+
+ compiled_param
+ = ctx->get_backend ()->parameter_variable (fndecl, "_", decl_type, locus);
+}
+
+void
+CompileFnParam::visit (HIR::StructPattern &pattern)
+{
+ // generate the anon param
+ tree tmp_ident = create_tmp_var_name ("RSTPRM");
+ std::string cpp_str_identifier = std::string (IDENTIFIER_POINTER (tmp_ident));
+
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+ compiled_param
+ = ctx->get_backend ()->parameter_variable (fndecl, cpp_str_identifier,
+ decl_type, locus);
+
+ // setup the pattern bindings
+ tree anon_param = ctx->get_backend ()->var_expression (compiled_param, locus);
+ CompilePatternBindings::Compile (&pattern, anon_param, ctx);
+}
+
+void
+CompileFnParam::visit (HIR::TupleStructPattern &pattern)
+{
+ // generate the anon param
+ tree tmp_ident = create_tmp_var_name ("RSTPRM");
+ std::string cpp_str_identifier = std::string (IDENTIFIER_POINTER (tmp_ident));
+
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+ compiled_param
+ = ctx->get_backend ()->parameter_variable (fndecl, cpp_str_identifier,
+ decl_type, locus);
+
+ // setup the pattern bindings
+ tree anon_param = ctx->get_backend ()->var_expression (compiled_param, locus);
+ CompilePatternBindings::Compile (&pattern, anon_param, ctx);
+}
+
+Bvariable *
+CompileSelfParam::compile (Context *ctx, tree fndecl, HIR::SelfParam &self,
+ tree decl_type, Location locus)
+{
+ bool is_immutable
+ = self.get_self_kind () == HIR::SelfParam::ImplicitSelfKind::IMM
+ || self.get_self_kind () == HIR::SelfParam::ImplicitSelfKind::IMM_REF;
+ if (is_immutable)
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+
+ return ctx->get_backend ()->parameter_variable (fndecl, "self", decl_type,
+ locus);
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-fnparam.h b/gcc/rust/backend/rust-compile-fnparam.h
new file mode 100644
index 00000000000..0dbbd99ef08
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-fnparam.h
@@ -0,0 +1,70 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_FNPARAM
+#define RUST_COMPILE_FNPARAM
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileFnParam : private HIRCompileBase, protected HIR::HIRPatternVisitor
+{
+public:
+ static Bvariable *compile (Context *ctx, tree fndecl,
+ HIR::FunctionParam *param, tree decl_type,
+ Location locus);
+ static Bvariable *compile (Context *ctx, tree fndecl, HIR::Pattern *param,
+ tree decl_type, Location locus);
+
+ void visit (HIR::IdentifierPattern &pattern) override;
+ void visit (HIR::WildcardPattern &pattern) override;
+ void visit (HIR::StructPattern &) override;
+ void visit (HIR::TupleStructPattern &) override;
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::LiteralPattern &) override {}
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::RangePattern &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+
+private:
+ CompileFnParam (Context *ctx, tree fndecl, tree decl_type, Location locus);
+
+ tree fndecl;
+ tree decl_type;
+ Location locus;
+ Bvariable *compiled_param;
+};
+
+class CompileSelfParam : private HIRCompileBase
+{
+public:
+ static Bvariable *compile (Context *ctx, tree fndecl, HIR::SelfParam &self,
+ tree decl_type, Location locus);
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_FNPARAM
diff --git a/gcc/rust/backend/rust-compile-implitem.cc b/gcc/rust/backend/rust-compile-implitem.cc
new file mode 100644
index 00000000000..d0f70a70228
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-implitem.cc
@@ -0,0 +1,101 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-implitem.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-fnparam.h"
+
+namespace Rust {
+namespace Compile {
+
+void
+CompileTraitItem::visit (HIR::TraitItemConst &constant)
+{
+ rust_assert (concrete != nullptr);
+ TyTy::BaseType *resolved_type = concrete;
+
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ bool ok = ctx->get_mappings ()->lookup_canonical_path (
+ constant.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ HIR::Expr *const_value_expr = constant.get_expr ().get ();
+ tree const_expr
+ = compile_constant_item (ctx, resolved_type, canonical_path,
+ const_value_expr, constant.get_locus ());
+ ctx->push_const (const_expr);
+ ctx->insert_const_decl (constant.get_mappings ().get_hirid (), const_expr);
+
+ reference = const_expr;
+}
+
+void
+CompileTraitItem::visit (HIR::TraitItemFunc &func)
+{
+ rust_assert (func.has_block_defined ());
+
+ rust_assert (concrete->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (concrete);
+ fntype->monomorphize ();
+
+ // items can be forward compiled which means we may not need to invoke this
+ // code. We might also have already compiled this generic function as well.
+ tree lookup = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &lookup,
+ fntype->get_id (), fntype))
+ {
+ // has this been added to the list then it must be finished
+ if (ctx->function_completed (lookup))
+ {
+ tree dummy = NULL_TREE;
+ if (!ctx->lookup_function_decl (fntype->get_ty_ref (), &dummy))
+ {
+ ctx->insert_function_decl (fntype, lookup);
+ }
+
+ reference = address_expression (lookup, ref_locus);
+ return;
+ }
+ }
+
+ if (fntype->has_subsititions_defined ())
+ {
+ // override the Hir Lookups for the substituions in this context
+ fntype->override_context ();
+ }
+
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ bool ok = ctx->get_mappings ()->lookup_canonical_path (
+ func.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ // FIXME: How do we get the proper visibility here?
+ auto vis = HIR::Visibility (HIR::Visibility::VisType::PUBLIC);
+ HIR::TraitFunctionDecl &function = func.get_decl ();
+ tree fndecl
+ = compile_function (ctx, function.get_function_name (),
+ function.get_self (), function.get_function_params (),
+ function.get_qualifiers (), vis,
+ func.get_outer_attrs (), func.get_locus (),
+ func.get_block_expr ().get (), canonical_path, fntype,
+ function.has_return_type ());
+ reference = address_expression (fndecl, ref_locus);
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-implitem.h b/gcc/rust/backend/rust-compile-implitem.h
new file mode 100644
index 00000000000..ac9478af150
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-implitem.h
@@ -0,0 +1,91 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_IMPLITEM_H
+#define RUST_COMPILE_IMPLITEM_H
+
+#include "rust-compile-item.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-fnparam.h"
+
+namespace Rust {
+namespace Compile {
+
+// this is a proxy for HIR::ImplItem's back to use the normel HIR::Item path
+class CompileInherentImplItem : public CompileItem
+{
+public:
+ static tree Compile (HIR::ImplItem *item, Context *ctx,
+ TyTy::BaseType *concrete = nullptr,
+ bool is_query_mode = false,
+ Location ref_locus = Location ())
+ {
+ CompileInherentImplItem compiler (ctx, concrete, ref_locus);
+ item->accept_vis (compiler);
+
+ if (is_query_mode && compiler.reference == error_mark_node)
+ rust_internal_error_at (ref_locus, "failed to compile impl item: %s",
+ item->as_string ().c_str ());
+
+ return compiler.reference;
+ }
+
+private:
+ CompileInherentImplItem (Context *ctx, TyTy::BaseType *concrete,
+ Location ref_locus)
+ : CompileItem (ctx, concrete, ref_locus)
+ {}
+};
+
+class CompileTraitItem : public HIRCompileBase, public HIR::HIRTraitItemVisitor
+{
+public:
+ static tree Compile (HIR::TraitItem *item, Context *ctx,
+ TyTy::BaseType *concrete, bool is_query_mode = false,
+ Location ref_locus = Location ())
+ {
+ CompileTraitItem compiler (ctx, concrete, ref_locus);
+ item->accept_vis (compiler);
+
+ if (is_query_mode && compiler.reference == error_mark_node)
+ rust_internal_error_at (ref_locus, "failed to compile trait item: %s",
+ item->as_string ().c_str ());
+
+ return compiler.reference;
+ }
+
+ void visit (HIR::TraitItemConst &constant) override;
+ void visit (HIR::TraitItemFunc &func) override;
+
+ void visit (HIR::TraitItemType &typ) override {}
+
+private:
+ CompileTraitItem (Context *ctx, TyTy::BaseType *concrete, Location ref_locus)
+ : HIRCompileBase (ctx), concrete (concrete), reference (error_mark_node),
+ ref_locus (ref_locus)
+ {}
+
+ TyTy::BaseType *concrete;
+ tree reference;
+ Location ref_locus;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_IMPLITEM_H
diff --git a/gcc/rust/backend/rust-compile-intrinsic.cc b/gcc/rust/backend/rust-compile-intrinsic.cc
new file mode 100644
index 00000000000..61084b90f33
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-intrinsic.cc
@@ -0,0 +1,515 @@
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-intrinsic.h"
+#include "fold-const.h"
+#include "langhooks.h"
+#include "rust-compile-context.h"
+#include "rust-compile-type.h"
+#include "rust-compile-fnparam.h"
+#include "rust-builtins.h"
+#include "rust-diagnostics.h"
+#include "rust-location.h"
+#include "rust-tree.h"
+#include "tree-core.h"
+#include "print-tree.h"
+
+namespace Rust {
+namespace Compile {
+
+static tree
+offset_handler (Context *ctx, TyTy::FnType *fntype);
+static tree
+sizeof_handler (Context *ctx, TyTy::FnType *fntype);
+static tree
+transmute_handler (Context *ctx, TyTy::FnType *fntype);
+static tree
+rotate_handler (Context *ctx, TyTy::FnType *fntype, tree_code op);
+static tree
+wrapping_op_handler (Context *ctx, TyTy::FnType *fntype, tree_code op);
+static tree
+copy_nonoverlapping_handler (Context *ctx, TyTy::FnType *fntype);
+
+static inline tree
+rotate_left_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ return rotate_handler (ctx, fntype, LROTATE_EXPR);
+}
+static inline tree
+rotate_right_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ return rotate_handler (ctx, fntype, RROTATE_EXPR);
+}
+
+static inline tree
+wrapping_add_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ return wrapping_op_handler (ctx, fntype, PLUS_EXPR);
+}
+static inline tree
+wrapping_sub_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ return wrapping_op_handler (ctx, fntype, MINUS_EXPR);
+}
+static inline tree
+wrapping_mul_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ return wrapping_op_handler (ctx, fntype, MULT_EXPR);
+}
+
+static const std::map<std::string,
+ std::function<tree (Context *, TyTy::FnType *)>>
+ generic_intrinsics = {{"offset", &offset_handler},
+ {"size_of", &sizeof_handler},
+ {"transmute", &transmute_handler},
+ {"rotate_left", &rotate_left_handler},
+ {"rotate_right", &rotate_right_handler},
+ {"wrapping_add", &wrapping_add_handler},
+ {"wrapping_sub", &wrapping_sub_handler},
+ {"wrapping_mul", &wrapping_mul_handler},
+ {"copy_nonoverlapping", &copy_nonoverlapping_handler}};
+
+Intrinsics::Intrinsics (Context *ctx) : ctx (ctx) {}
+
+tree
+Intrinsics::compile (TyTy::FnType *fntype)
+{
+ rust_assert (fntype->get_abi () == ABI::INTRINSIC);
+
+ tree builtin = error_mark_node;
+ BuiltinsContext &builtin_ctx = BuiltinsContext::get ();
+ if (builtin_ctx.lookup_simple_builtin (fntype->get_identifier (), &builtin))
+ return builtin;
+
+ // is it an generic builtin?
+ auto it = generic_intrinsics.find (fntype->get_identifier ());
+ if (it != generic_intrinsics.end ())
+ return it->second (ctx, fntype);
+
+ Location locus = ctx->get_mappings ()->lookup_location (fntype->get_ref ());
+ rust_error_at (locus, "unknown builtin intrinsic: %s",
+ fntype->get_identifier ().c_str ());
+
+ return error_mark_node;
+}
+
+/**
+ * Items can be forward compiled which means we may not need to invoke this
+ * code. We might also have already compiled this generic function as well.
+ */
+static bool
+check_for_cached_intrinsic (Context *ctx, TyTy::FnType *fntype, tree *lookup)
+{
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), lookup,
+ fntype->get_id (), fntype))
+ {
+ // Has this been added to the list? Then it must be finished
+ if (ctx->function_completed (*lookup))
+ {
+ tree dummy = NULL_TREE;
+ if (!ctx->lookup_function_decl (fntype->get_ty_ref (), &dummy))
+ ctx->insert_function_decl (fntype, *lookup);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Maybe override the Hir Lookups for the substituions in this context
+ */
+static void
+maybe_override_ctx (TyTy::FnType *fntype)
+{
+ if (fntype->has_subsititions_defined ())
+ fntype->override_context ();
+}
+
+/**
+ * Compile and setup a function's parameters
+ */
+static void
+compile_fn_params (Context *ctx, TyTy::FnType *fntype, tree fndecl,
+ std::vector<Bvariable *> *compiled_param_variables,
+ std::vector<tree_node *> *compiled_param_types = nullptr)
+{
+ for (auto &parm : fntype->get_params ())
+ {
+ auto &referenced_param = parm.first;
+ auto &param_tyty = parm.second;
+ auto compiled_param_type = TyTyResolveCompile::compile (ctx, param_tyty);
+
+ Location param_locus = referenced_param->get_locus ();
+ Bvariable *compiled_param_var
+ = CompileFnParam::compile (ctx, fndecl, referenced_param,
+ compiled_param_type, param_locus);
+
+ compiled_param_variables->push_back (compiled_param_var);
+ if (compiled_param_types)
+ compiled_param_types->push_back (compiled_param_type);
+ }
+}
+
+static tree
+compile_intrinsic_function (Context *ctx, TyTy::FnType *fntype)
+{
+ maybe_override_ctx (fntype);
+
+ const Resolver::CanonicalPath &canonical_path = fntype->get_ident ().path;
+
+ tree compiled_fn_type = TyTyResolveCompile::compile (ctx, fntype);
+ std::string ir_symbol_name
+ = canonical_path.get () + fntype->subst_as_string ();
+ std::string asm_name = ctx->mangle_item (fntype, canonical_path);
+
+ unsigned int flags = 0;
+ tree fndecl
+ = ctx->get_backend ()->function (compiled_fn_type, ir_symbol_name, asm_name,
+ flags, fntype->get_ident ().locus);
+
+ TREE_PUBLIC (fndecl) = 0;
+ TREE_READONLY (fndecl) = 1;
+ DECL_ARTIFICIAL (fndecl) = 1;
+ DECL_EXTERNAL (fndecl) = 0;
+ DECL_DECLARED_INLINE_P (fndecl) = 1;
+
+ return fndecl;
+}
+
+static void
+enter_intrinsic_block (Context *ctx, tree fndecl)
+{
+ tree enclosing_scope = NULL_TREE;
+ Location start_location = Location ();
+ Location end_location = Location ();
+
+ auto block = ctx->get_backend ()->block (fndecl, enclosing_scope, {},
+ start_location, end_location);
+
+ ctx->push_block (block);
+}
+
+static void
+finalize_intrinsic_block (Context *ctx, tree fndecl)
+{
+ tree bind_tree = ctx->pop_block ();
+
+ gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR);
+
+ DECL_SAVED_TREE (fndecl) = bind_tree;
+
+ ctx->push_function (fndecl);
+}
+
+static tree
+offset_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ // offset intrinsic has two params dst pointer and offset isize
+ rust_assert (fntype->get_params ().size () == 2);
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ std::vector<Bvariable *> param_vars;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars);
+
+ auto &dst_param = param_vars.at (0);
+ auto &size_param = param_vars.at (1);
+ rust_assert (param_vars.size () == 2);
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN offset FN BODY BEGIN
+ tree dst = ctx->get_backend ()->var_expression (dst_param, Location ());
+ tree size = ctx->get_backend ()->var_expression (size_param, Location ());
+ tree pointer_offset_expr
+ = pointer_offset_expression (dst, size, BUILTINS_LOCATION);
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {pointer_offset_expr},
+ Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN offset FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static tree
+sizeof_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ // size_of has _zero_ parameters its parameter is the generic one
+ rust_assert (fntype->get_params ().size () == 0);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // get the template parameter type tree fn size_of<T>();
+ rust_assert (fntype->get_num_substitutions () == 1);
+ auto &param_mapping = fntype->get_substs ().at (0);
+ const TyTy::ParamType *param_tyty = param_mapping.get_param_ty ();
+ TyTy::BaseType *resolved_tyty = param_tyty->resolve ();
+ tree template_parameter_type
+ = TyTyResolveCompile::compile (ctx, resolved_tyty);
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN size_of FN BODY BEGIN
+ tree size_expr = TYPE_SIZE_UNIT (template_parameter_type);
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {size_expr}, Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN size_of FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static tree
+transmute_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ // transmute intrinsic has one parameter
+ rust_assert (fntype->get_params ().size () == 1);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ std::vector<Bvariable *> param_vars;
+ std::vector<tree_node *> compiled_types;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars, &compiled_types);
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ // param to convert
+ Bvariable *convert_me_param = param_vars.at (0);
+ tree convert_me_expr
+ = ctx->get_backend ()->var_expression (convert_me_param, Location ());
+
+ // check for transmute pre-conditions
+ tree target_type_expr = TREE_TYPE (DECL_RESULT (fndecl));
+ tree source_type_expr = compiled_types.at (0);
+ tree target_size_expr = TYPE_SIZE (target_type_expr);
+ tree source_size_expr = TYPE_SIZE (source_type_expr);
+ // for some reason, unit types and other zero-sized types return NULL for the
+ // size expressions
+ unsigned HOST_WIDE_INT target_size
+ = target_size_expr ? TREE_INT_CST_LOW (target_size_expr) : 0;
+ unsigned HOST_WIDE_INT source_size
+ = source_size_expr ? TREE_INT_CST_LOW (source_size_expr) : 0;
+
+ // size check for concrete types
+ // TODO(liushuyu): check alignment for pointers; check for dependently-sized
+ // types
+ if (target_size != source_size)
+ {
+ rust_error_at (fntype->get_locus (),
+ "cannot transmute between types of different sizes, or "
+ "dependently-sized types");
+ rust_inform (fntype->get_ident ().locus, "source type: %qs (%lu bits)",
+ fntype->get_params ().at (0).second->as_string ().c_str (),
+ (unsigned long) source_size);
+ rust_inform (fntype->get_ident ().locus, "target type: %qs (%lu bits)",
+ fntype->get_return_type ()->as_string ().c_str (),
+ (unsigned long) target_size);
+ }
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN transmute FN BODY BEGIN
+
+ // Return *((orig_type*)&decl) */
+
+ tree t
+ = build_fold_addr_expr_loc (Location ().gcc_location (), convert_me_expr);
+ t = fold_build1_loc (Location ().gcc_location (), NOP_EXPR,
+ build_pointer_type (target_type_expr), t);
+ tree result_expr
+ = build_fold_indirect_ref_loc (Location ().gcc_location (), t);
+
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {result_expr},
+ Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN transmute FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static tree
+rotate_handler (Context *ctx, TyTy::FnType *fntype, tree_code op)
+{
+ // rotate intrinsic has two parameter
+ rust_assert (fntype->get_params ().size () == 2);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // setup the params
+ std::vector<Bvariable *> param_vars;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars);
+
+ auto &x_param = param_vars.at (0);
+ auto &y_param = param_vars.at (1);
+ rust_assert (param_vars.size () == 2);
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN rotate FN BODY BEGIN
+ tree x = ctx->get_backend ()->var_expression (x_param, Location ());
+ tree y = ctx->get_backend ()->var_expression (y_param, Location ());
+ tree rotate_expr
+ = fold_build2_loc (BUILTINS_LOCATION, op, TREE_TYPE (x), x, y);
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {rotate_expr},
+ Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN rotate FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+/**
+ * pub fn wrapping_{add, sub, mul}<T>(lhs: T, rhs: T) -> T;
+ */
+static tree
+wrapping_op_handler (Context *ctx, TyTy::FnType *fntype, tree_code op)
+{
+ // wrapping_<op> intrinsics have two parameter
+ rust_assert (fntype->get_params ().size () == 2);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // setup the params
+ std::vector<Bvariable *> param_vars;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars);
+
+ auto &lhs_param = param_vars.at (0);
+ auto &rhs_param = param_vars.at (1);
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN wrapping_<op> FN BODY BEGIN
+ auto lhs = ctx->get_backend ()->var_expression (lhs_param, Location ());
+ auto rhs = ctx->get_backend ()->var_expression (rhs_param, Location ());
+
+ // Operations are always wrapping in Rust, as we have -fwrapv enabled by
+ // default. The difference between a wrapping_{add, sub, mul} and a regular
+ // arithmetic operation is that these intrinsics do not panic - they always
+ // carry over.
+ auto wrap_expr = build2 (op, TREE_TYPE (lhs), lhs, rhs);
+
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {wrap_expr}, Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN wrapping_<op> FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+/**
+ * fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ */
+static tree
+copy_nonoverlapping_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ rust_assert (fntype->get_params ().size () == 3);
+ rust_assert (fntype->get_num_substitutions () == 1);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // Most intrinsic functions are pure - not `copy_nonoverlapping`
+ TREE_READONLY (fndecl) = 0;
+ TREE_SIDE_EFFECTS (fndecl) = 1;
+
+ // setup the params
+ std::vector<Bvariable *> param_vars;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars);
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN copy_nonoverlapping BODY BEGIN
+
+ auto src = ctx->get_backend ()->var_expression (param_vars[0], Location ());
+ auto dst = ctx->get_backend ()->var_expression (param_vars[1], Location ());
+ auto count = ctx->get_backend ()->var_expression (param_vars[2], Location ());
+
+ // We want to create the following statement
+ // memcpy(dst, src, size_of::<T>());
+ // so
+ // memcpy(dst, src, size_expr);
+
+ auto *resolved_ty = fntype->get_substs ().at (0).get_param_ty ()->resolve ();
+ auto param_type = TyTyResolveCompile::compile (ctx, resolved_ty);
+
+ tree size_expr
+ = build2 (MULT_EXPR, size_type_node, TYPE_SIZE_UNIT (param_type), count);
+
+ tree memcpy_raw = nullptr;
+ BuiltinsContext::get ().lookup_simple_builtin ("memcpy", &memcpy_raw);
+ rust_assert (memcpy_raw);
+ auto memcpy
+ = build_fold_addr_expr_loc (Location ().gcc_location (), memcpy_raw);
+
+ auto copy_call
+ = ctx->get_backend ()->call_expression (memcpy, {dst, src, size_expr},
+ nullptr, Location ());
+
+ ctx->add_statement (copy_call);
+
+ // BUILTIN copy_nonoverlapping BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-intrinsic.h b/gcc/rust/backend/rust-compile-intrinsic.h
new file mode 100644
index 00000000000..dceb0864fd4
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-intrinsic.h
@@ -0,0 +1,40 @@
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_INTRINSIC
+#define RUST_COMPILE_INTRINSIC
+
+#include "rust-compile-context.h"
+#include "langhooks.h"
+
+namespace Rust {
+namespace Compile {
+
+class Intrinsics
+{
+public:
+ Intrinsics (Context *ctx);
+
+ tree compile (TyTy::FnType *fntype);
+
+private:
+ Context *ctx;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_INTRINSIC
diff --git a/gcc/rust/backend/rust-compile-item.cc b/gcc/rust/backend/rust-compile-item.cc
new file mode 100644
index 00000000000..ceba51c2d27
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-item.cc
@@ -0,0 +1,206 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-item.h"
+#include "rust-compile-implitem.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-extern.h"
+#include "rust-constexpr.h"
+
+namespace Rust {
+namespace Compile {
+
+void
+CompileItem::visit (HIR::StaticItem &var)
+{
+ // have we already compiled this?
+ Bvariable *static_decl_ref = nullptr;
+ if (ctx->lookup_var_decl (var.get_mappings ().get_hirid (), &static_decl_ref))
+ {
+ reference
+ = ctx->get_backend ()->var_expression (static_decl_ref, ref_locus);
+ return;
+ }
+
+ TyTy::BaseType *resolved_type = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (var.get_mappings ().get_hirid (),
+ &resolved_type);
+ rust_assert (ok);
+
+ tree type = TyTyResolveCompile::compile (ctx, resolved_type);
+ tree value = CompileExpr::Compile (var.get_expr (), ctx);
+
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ ok = ctx->get_mappings ()->lookup_canonical_path (
+ var.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ std::string name = canonical_path->get ();
+ std::string asm_name = ctx->mangle_item (resolved_type, *canonical_path);
+
+ bool is_external = false;
+ bool is_hidden = false;
+ bool in_unique_section = true;
+
+ Bvariable *static_global
+ = ctx->get_backend ()->global_variable (name, asm_name, type, is_external,
+ is_hidden, in_unique_section,
+ var.get_locus ());
+ ctx->get_backend ()->global_variable_set_init (static_global, value);
+
+ ctx->insert_var_decl (var.get_mappings ().get_hirid (), static_global);
+ ctx->push_var (static_global);
+
+ reference = ctx->get_backend ()->var_expression (static_global, ref_locus);
+}
+
+void
+CompileItem::visit (HIR::ConstantItem &constant)
+{
+ if (ctx->lookup_const_decl (constant.get_mappings ().get_hirid (),
+ &reference))
+ return;
+
+ // resolve the type
+ TyTy::BaseType *resolved_type = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (constant.get_mappings ().get_hirid (),
+ &resolved_type);
+ rust_assert (ok);
+
+ // canonical path
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ ok = ctx->get_mappings ()->lookup_canonical_path (
+ constant.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ HIR::Expr *const_value_expr = constant.get_expr ();
+ ctx->push_const_context ();
+ tree const_expr
+ = compile_constant_item (ctx, resolved_type, canonical_path,
+ const_value_expr, constant.get_locus ());
+ ctx->pop_const_context ();
+
+ ctx->push_const (const_expr);
+ ctx->insert_const_decl (constant.get_mappings ().get_hirid (), const_expr);
+ reference = const_expr;
+}
+
+void
+CompileItem::visit (HIR::Function &function)
+{
+ TyTy::BaseType *fntype_tyty;
+ if (!ctx->get_tyctx ()->lookup_type (function.get_mappings ().get_hirid (),
+ &fntype_tyty))
+ {
+ rust_fatal_error (function.get_locus (),
+ "failed to lookup function type");
+ return;
+ }
+
+ rust_assert (fntype_tyty->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (fntype_tyty);
+ if (fntype->has_subsititions_defined ())
+ {
+ // we cant do anything for this only when it is used and a concrete type
+ // is given
+ if (concrete == nullptr)
+ return;
+ else
+ {
+ rust_assert (concrete->get_kind () == TyTy::TypeKind::FNDEF);
+ fntype = static_cast<TyTy::FnType *> (concrete);
+ fntype->monomorphize ();
+ }
+ }
+
+ // items can be forward compiled which means we may not need to invoke this
+ // code. We might also have already compiled this generic function as well.
+ tree lookup = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &lookup,
+ fntype->get_id (), fntype))
+ {
+ // has this been added to the list then it must be finished
+ if (ctx->function_completed (lookup))
+ {
+ tree dummy = NULL_TREE;
+ if (!ctx->lookup_function_decl (fntype->get_ty_ref (), &dummy))
+ {
+ ctx->insert_function_decl (fntype, lookup);
+ }
+
+ reference = address_expression (lookup, ref_locus);
+ return;
+ }
+ }
+
+ if (fntype->has_subsititions_defined ())
+ {
+ // override the Hir Lookups for the substituions in this context
+ fntype->override_context ();
+ }
+
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ bool ok = ctx->get_mappings ()->lookup_canonical_path (
+ function.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ tree fndecl
+ = compile_function (ctx, function.get_function_name (),
+ function.get_self_param (),
+ function.get_function_params (),
+ function.get_qualifiers (), function.get_visibility (),
+ function.get_outer_attrs (), function.get_locus (),
+ function.get_definition ().get (), canonical_path,
+ fntype, function.has_function_return_type ());
+ reference = address_expression (fndecl, ref_locus);
+}
+
+void
+CompileItem::visit (HIR::ImplBlock &impl_block)
+{
+ TyTy::BaseType *self_lookup = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ impl_block.get_type ()->get_mappings ().get_hirid (), &self_lookup))
+ {
+ rust_error_at (impl_block.get_locus (), "failed to resolve type of impl");
+ return;
+ }
+
+ for (auto &impl_item : impl_block.get_impl_items ())
+ CompileInherentImplItem::Compile (impl_item.get (), ctx);
+}
+
+void
+CompileItem::visit (HIR::ExternBlock &extern_block)
+{
+ for (auto &item : extern_block.get_extern_items ())
+ {
+ CompileExternItem::compile (item.get (), ctx, concrete);
+ }
+}
+
+void
+CompileItem::visit (HIR::Module &module)
+{
+ for (auto &item : module.get_items ())
+ CompileItem::compile (item.get (), ctx);
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-item.h b/gcc/rust/backend/rust-compile-item.h
new file mode 100644
index 00000000000..3c12f1040fc
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-item.h
@@ -0,0 +1,88 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_ITEM
+#define RUST_COMPILE_ITEM
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileItem : private HIRCompileBase, protected HIR::HIRStmtVisitor
+{
+protected:
+public:
+ static tree compile (HIR::Item *item, Context *ctx,
+ TyTy::BaseType *concrete = nullptr,
+ bool is_query_mode = false,
+ Location ref_locus = Location ())
+ {
+ CompileItem compiler (ctx, concrete, ref_locus);
+ item->accept_vis (compiler);
+
+ if (is_query_mode && compiler.reference == error_mark_node)
+ rust_internal_error_at (ref_locus, "failed to compile item: %s",
+ item->as_string ().c_str ());
+
+ return compiler.reference;
+ }
+
+ void visit (HIR::StaticItem &var) override;
+ void visit (HIR::ConstantItem &constant) override;
+ void visit (HIR::Function &function) override;
+ void visit (HIR::ImplBlock &impl_block) override;
+ void visit (HIR::ExternBlock &extern_block) override;
+ void visit (HIR::Module &module) override;
+
+ // Empty visit for unused Stmt HIR nodes.
+ void visit (HIR::TupleStruct &) override {}
+ void visit (HIR::EnumItem &) override {}
+ void visit (HIR::EnumItemTuple &) override {}
+ void visit (HIR::EnumItemStruct &) override {}
+ void visit (HIR::EnumItemDiscriminant &) override {}
+ void visit (HIR::TypePathSegmentFunction &) override {}
+ void visit (HIR::TypePath &) override {}
+ void visit (HIR::QualifiedPathInType &) override {}
+ void visit (HIR::ExternCrate &) override {}
+ void visit (HIR::UseDeclaration &) override {}
+ void visit (HIR::TypeAlias &) override {}
+ void visit (HIR::StructStruct &) override {}
+ void visit (HIR::Enum &) override {}
+ void visit (HIR::Union &) override {}
+ void visit (HIR::Trait &) override {}
+ void visit (HIR::EmptyStmt &) override {}
+ void visit (HIR::LetStmt &) override {}
+ void visit (HIR::ExprStmtWithoutBlock &) override {}
+ void visit (HIR::ExprStmtWithBlock &) override {}
+
+protected:
+ CompileItem (Context *ctx, TyTy::BaseType *concrete, Location ref_locus)
+ : HIRCompileBase (ctx), concrete (concrete), reference (error_mark_node),
+ ref_locus (ref_locus)
+ {}
+
+ TyTy::BaseType *concrete;
+ tree reference;
+ Location ref_locus;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_ITEM
diff --git a/gcc/rust/backend/rust-compile-pattern.cc b/gcc/rust/backend/rust-compile-pattern.cc
new file mode 100644
index 00000000000..1d8eda1a577
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-pattern.cc
@@ -0,0 +1,333 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-pattern.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-resolve-path.h"
+#include "rust-constexpr.h"
+
+namespace Rust {
+namespace Compile {
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::PathInExpression &pattern)
+{
+ // lookup the type
+ TyTy::BaseType *lookup = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (pattern.get_mappings ().get_hirid (),
+ &lookup);
+ rust_assert (ok);
+
+ // this must be an enum
+ rust_assert (lookup->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (lookup);
+ rust_assert (adt->is_enum ());
+
+ // lookup the variant
+ HirId variant_id;
+ ok = ctx->get_tyctx ()->lookup_variant_definition (
+ pattern.get_mappings ().get_hirid (), &variant_id);
+ rust_assert (ok);
+
+ TyTy::VariantDef *variant = nullptr;
+ ok = adt->lookup_variant_by_id (variant_id, &variant);
+ rust_assert (ok);
+
+ HIR::Expr *discrim_expr = variant->get_discriminant ();
+ tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx);
+ tree folded_discrim_expr = fold_expr (discrim_expr_node);
+ tree case_low = folded_discrim_expr;
+
+ case_label_expr
+ = build_case_label (case_low, NULL_TREE, associated_case_label);
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::StructPattern &pattern)
+{
+ CompilePatternCaseLabelExpr::visit (pattern.get_path ());
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::TupleStructPattern &pattern)
+{
+ CompilePatternCaseLabelExpr::visit (pattern.get_path ());
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::WildcardPattern &pattern)
+{
+ // operand 0 being NULL_TREE signifies this is the default case label see:
+ // tree.def for documentation for CASE_LABEL_EXPR
+ case_label_expr
+ = build_case_label (NULL_TREE, NULL_TREE, associated_case_label);
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::LiteralPattern &pattern)
+{
+ // Compile the literal
+ HIR::LiteralExpr *litexpr
+ = new HIR::LiteralExpr (pattern.get_pattern_mappings (),
+ pattern.get_literal (), pattern.get_locus (),
+ std::vector<AST::Attribute> ());
+
+ // Note: Floating point literals are currently accepted but will likely be
+ // forbidden in LiteralPatterns in a future version of Rust.
+ // See: https://github.com/rust-lang/rust/issues/41620
+ // For now, we cannot compile them anyway as CASE_LABEL_EXPR does not support
+ // floating point types.
+ if (pattern.get_literal ().get_lit_type () == HIR::Literal::LitType::FLOAT)
+ {
+ rust_sorry_at (pattern.get_locus (), "floating-point literal in pattern");
+ }
+
+ tree lit = CompileExpr::Compile (litexpr, ctx);
+
+ case_label_expr = build_case_label (lit, NULL_TREE, associated_case_label);
+}
+
+static tree
+compile_range_pattern_bound (HIR::RangePatternBound *bound,
+ Analysis::NodeMapping mappings, Location locus,
+ Context *ctx)
+{
+ tree result = NULL_TREE;
+ switch (bound->get_bound_type ())
+ {
+ case HIR::RangePatternBound::RangePatternBoundType::LITERAL: {
+ HIR::RangePatternBoundLiteral &ref
+ = *static_cast<HIR::RangePatternBoundLiteral *> (bound);
+
+ HIR::LiteralExpr *litexpr
+ = new HIR::LiteralExpr (mappings, ref.get_literal (), locus,
+ std::vector<AST::Attribute> ());
+
+ result = CompileExpr::Compile (litexpr, ctx);
+ }
+ break;
+
+ case HIR::RangePatternBound::RangePatternBoundType::PATH: {
+ HIR::RangePatternBoundPath &ref
+ = *static_cast<HIR::RangePatternBoundPath *> (bound);
+
+ result = ResolvePathRef::Compile (ref.get_path (), ctx);
+
+ // If the path resolves to a const expression, fold it.
+ result = fold_expr (result);
+ }
+ break;
+
+ case HIR::RangePatternBound::RangePatternBoundType::QUALPATH: {
+ HIR::RangePatternBoundQualPath &ref
+ = *static_cast<HIR::RangePatternBoundQualPath *> (bound);
+
+ result = ResolvePathRef::Compile (ref.get_qualified_path (), ctx);
+
+ // If the path resolves to a const expression, fold it.
+ result = fold_expr (result);
+ }
+ }
+
+ return result;
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::RangePattern &pattern)
+{
+ tree upper = compile_range_pattern_bound (pattern.get_upper_bound ().get (),
+ pattern.get_pattern_mappings (),
+ pattern.get_locus (), ctx);
+ tree lower = compile_range_pattern_bound (pattern.get_lower_bound ().get (),
+ pattern.get_pattern_mappings (),
+ pattern.get_locus (), ctx);
+
+ case_label_expr = build_case_label (lower, upper, associated_case_label);
+}
+
+// setup the bindings
+
+void
+CompilePatternBindings::visit (HIR::TupleStructPattern &pattern)
+{
+ // lookup the type
+ TyTy::BaseType *lookup = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ pattern.get_path ().get_mappings ().get_hirid (), &lookup);
+ rust_assert (ok);
+
+ // this must be an enum
+ rust_assert (lookup->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (lookup);
+ rust_assert (adt->number_of_variants () > 0);
+
+ int variant_index = 0;
+ TyTy::VariantDef *variant = adt->get_variants ().at (0);
+ if (adt->is_enum ())
+ {
+ HirId variant_id = UNKNOWN_HIRID;
+ bool ok = ctx->get_tyctx ()->lookup_variant_definition (
+ pattern.get_path ().get_mappings ().get_hirid (), &variant_id);
+ rust_assert (ok);
+
+ ok = adt->lookup_variant_by_id (variant_id, &variant, &variant_index);
+ rust_assert (ok);
+ }
+
+ rust_assert (variant->get_variant_type ()
+ == TyTy::VariantDef::VariantType::TUPLE);
+
+ std::unique_ptr<HIR::TupleStructItems> &items = pattern.get_items ();
+ switch (items->get_item_type ())
+ {
+ case HIR::TupleStructItems::RANGE: {
+ // TODO
+ gcc_unreachable ();
+ }
+ break;
+
+ case HIR::TupleStructItems::NO_RANGE: {
+ HIR::TupleStructItemsNoRange &items_no_range
+ = static_cast<HIR::TupleStructItemsNoRange &> (*items.get ());
+
+ rust_assert (items_no_range.get_patterns ().size ()
+ == variant->num_fields ());
+
+ if (adt->is_enum ())
+ {
+ // we are offsetting by + 1 here since the first field in the record
+ // is always the discriminator
+ size_t tuple_field_index = 1;
+ for (auto &pattern : items_no_range.get_patterns ())
+ {
+ tree variant_accessor
+ = ctx->get_backend ()->struct_field_expression (
+ match_scrutinee_expr, variant_index, pattern->get_locus ());
+
+ tree binding = ctx->get_backend ()->struct_field_expression (
+ variant_accessor, tuple_field_index++, pattern->get_locus ());
+
+ ctx->insert_pattern_binding (
+ pattern->get_pattern_mappings ().get_hirid (), binding);
+ }
+ }
+ else
+ {
+ size_t tuple_field_index = 0;
+ for (auto &pattern : items_no_range.get_patterns ())
+ {
+ tree variant_accessor = match_scrutinee_expr;
+
+ tree binding = ctx->get_backend ()->struct_field_expression (
+ variant_accessor, tuple_field_index++, pattern->get_locus ());
+
+ ctx->insert_pattern_binding (
+ pattern->get_pattern_mappings ().get_hirid (), binding);
+ }
+ }
+ }
+ break;
+ }
+}
+
+void
+CompilePatternBindings::visit (HIR::StructPattern &pattern)
+{
+ // lookup the type
+ TyTy::BaseType *lookup = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ pattern.get_path ().get_mappings ().get_hirid (), &lookup);
+ rust_assert (ok);
+
+ // this must be an enum
+ rust_assert (lookup->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (lookup);
+ rust_assert (adt->number_of_variants () > 0);
+
+ int variant_index = 0;
+ TyTy::VariantDef *variant = adt->get_variants ().at (0);
+ if (adt->is_enum ())
+ {
+ HirId variant_id = UNKNOWN_HIRID;
+ bool ok = ctx->get_tyctx ()->lookup_variant_definition (
+ pattern.get_path ().get_mappings ().get_hirid (), &variant_id);
+ rust_assert (ok);
+
+ ok = adt->lookup_variant_by_id (variant_id, &variant, &variant_index);
+ rust_assert (ok);
+ }
+
+ rust_assert (variant->get_variant_type ()
+ == TyTy::VariantDef::VariantType::STRUCT);
+
+ auto &struct_pattern_elems = pattern.get_struct_pattern_elems ();
+ for (auto &field : struct_pattern_elems.get_struct_pattern_fields ())
+ {
+ switch (field->get_item_type ())
+ {
+ case HIR::StructPatternField::ItemType::TUPLE_PAT: {
+ // TODO
+ gcc_unreachable ();
+ }
+ break;
+
+ case HIR::StructPatternField::ItemType::IDENT_PAT: {
+ // TODO
+ gcc_unreachable ();
+ }
+ break;
+
+ case HIR::StructPatternField::ItemType::IDENT: {
+ HIR::StructPatternFieldIdent &ident
+ = static_cast<HIR::StructPatternFieldIdent &> (*field.get ());
+
+ size_t offs = 0;
+ ok
+ = variant->lookup_field (ident.get_identifier (), nullptr, &offs);
+ rust_assert (ok);
+
+ tree binding = error_mark_node;
+ if (adt->is_enum ())
+ {
+ tree variant_accessor
+ = ctx->get_backend ()->struct_field_expression (
+ match_scrutinee_expr, variant_index, ident.get_locus ());
+
+ // we are offsetting by + 1 here since the first field in the
+ // record is always the discriminator
+ binding = ctx->get_backend ()->struct_field_expression (
+ variant_accessor, offs + 1, ident.get_locus ());
+ }
+ else
+ {
+ tree variant_accessor = match_scrutinee_expr;
+ binding = ctx->get_backend ()->struct_field_expression (
+ variant_accessor, offs, ident.get_locus ());
+ }
+
+ ctx->insert_pattern_binding (ident.get_mappings ().get_hirid (),
+ binding);
+ }
+ break;
+ }
+ }
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-pattern.h b/gcc/rust/backend/rust-compile-pattern.h
new file mode 100644
index 00000000000..0eb5d61249b
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-pattern.h
@@ -0,0 +1,95 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompilePatternCaseLabelExpr : public HIRCompileBase,
+ public HIR::HIRPatternVisitor
+{
+public:
+ static tree Compile (HIR::Pattern *pattern, tree associated_case_label,
+ Context *ctx)
+ {
+ CompilePatternCaseLabelExpr compiler (ctx, associated_case_label);
+ pattern->accept_vis (compiler);
+ return compiler.case_label_expr;
+ }
+
+ void visit (HIR::PathInExpression &pattern) override;
+ void visit (HIR::StructPattern &pattern) override;
+ void visit (HIR::TupleStructPattern &pattern) override;
+ void visit (HIR::WildcardPattern &pattern) override;
+ void visit (HIR::RangePattern &pattern) override;
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::IdentifierPattern &) override {}
+ void visit (HIR::LiteralPattern &) override;
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+
+ CompilePatternCaseLabelExpr (Context *ctx, tree associated_case_label)
+ : HIRCompileBase (ctx), case_label_expr (error_mark_node),
+ associated_case_label (associated_case_label)
+ {}
+
+ tree case_label_expr;
+ tree associated_case_label;
+};
+
+class CompilePatternBindings : public HIRCompileBase,
+ public HIR::HIRPatternVisitor
+{
+public:
+ static void Compile (HIR::Pattern *pattern, tree match_scrutinee_expr,
+ Context *ctx)
+ {
+ CompilePatternBindings compiler (ctx, match_scrutinee_expr);
+ pattern->accept_vis (compiler);
+ }
+
+ void visit (HIR::StructPattern &pattern) override;
+ void visit (HIR::TupleStructPattern &pattern) override;
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::IdentifierPattern &) override {}
+ void visit (HIR::LiteralPattern &) override {}
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::RangePattern &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+ void visit (HIR::WildcardPattern &) override {}
+
+protected:
+ CompilePatternBindings (Context *ctx, tree match_scrutinee_expr)
+ : HIRCompileBase (ctx), match_scrutinee_expr (match_scrutinee_expr)
+ {}
+
+ tree match_scrutinee_expr;
+};
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-resolve-path.cc b/gcc/rust/backend/rust-compile-resolve-path.cc
new file mode 100644
index 00000000000..4fb3d540257
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-resolve-path.cc
@@ -0,0 +1,301 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-resolve-path.h"
+#include "rust-compile-intrinsic.h"
+#include "rust-compile-item.h"
+#include "rust-compile-implitem.h"
+#include "rust-compile-expr.h"
+#include "rust-hir-trait-resolve.h"
+#include "rust-hir-path-probe.h"
+#include "rust-compile-extern.h"
+#include "rust-constexpr.h"
+
+namespace Rust {
+namespace Compile {
+
+void
+ResolvePathRef::visit (HIR::QualifiedPathInExpression &expr)
+{
+ resolved = resolve (expr.get_final_segment ().get_segment (),
+ expr.get_mappings (), expr.get_locus (), true);
+}
+
+void
+ResolvePathRef::visit (HIR::PathInExpression &expr)
+{
+ resolved = resolve (expr.get_final_segment ().get_segment (),
+ expr.get_mappings (), expr.get_locus (), false);
+}
+
+tree
+ResolvePathRef::resolve (const HIR::PathIdentSegment &final_segment,
+ const Analysis::NodeMapping &mappings,
+ Location expr_locus, bool is_qualified_path)
+{
+ TyTy::BaseType *lookup = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (mappings.get_hirid (), &lookup);
+ rust_assert (ok);
+
+ // need to look up the reference for this identifier
+ NodeId ref_node_id = UNKNOWN_NODEID;
+ if (!ctx->get_resolver ()->lookup_resolved_name (mappings.get_nodeid (),
+ &ref_node_id))
+ {
+ // this can fail because it might be a Constructor for something
+ // in that case the caller should attempt ResolvePathType::Compile
+
+ // it might be an enum data-less enum variant
+ if (lookup->get_kind () != TyTy::TypeKind::ADT)
+ return error_mark_node;
+
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (lookup);
+
+ // it might be a unit-struct
+ if (adt->is_unit ())
+ {
+ return ctx->get_backend ()->unit_expression ();
+ }
+
+ if (!adt->is_enum ())
+ return error_mark_node;
+
+ HirId variant_id;
+ if (!ctx->get_tyctx ()->lookup_variant_definition (mappings.get_hirid (),
+ &variant_id))
+ return error_mark_node;
+
+ int union_disriminator = -1;
+ TyTy::VariantDef *variant = nullptr;
+ if (!adt->lookup_variant_by_id (variant_id, &variant,
+ &union_disriminator))
+ return error_mark_node;
+
+ // this can only be for discriminant variants the others are built up
+ // using call-expr or struct-init
+ rust_assert (variant->get_variant_type ()
+ == TyTy::VariantDef::VariantType::NUM);
+
+ // we need the actual gcc type
+ tree compiled_adt_type = TyTyResolveCompile::compile (ctx, adt);
+
+ // make the ctor for the union
+ HIR::Expr *discrim_expr = variant->get_discriminant ();
+ tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx);
+ tree folded_discrim_expr = fold_expr (discrim_expr_node);
+ tree qualifier = folded_discrim_expr;
+
+ return ctx->get_backend ()->constructor_expression (compiled_adt_type,
+ true, {qualifier},
+ union_disriminator,
+ expr_locus);
+ }
+
+ HirId ref;
+ if (!ctx->get_mappings ()->lookup_node_to_hir (ref_node_id, &ref))
+ {
+ rust_error_at (expr_locus, "reverse call path lookup failure");
+ return error_mark_node;
+ }
+
+ // might be a constant
+ tree constant_expr;
+ if (ctx->lookup_const_decl (ref, &constant_expr))
+ {
+ TREE_USED (constant_expr) = 1;
+ return constant_expr;
+ }
+
+ // this might be a variable reference or a function reference
+ Bvariable *var = nullptr;
+ if (ctx->lookup_var_decl (ref, &var))
+ {
+ // TREE_USED is setup in the gcc abstraction here
+ return ctx->get_backend ()->var_expression (var, expr_locus);
+ }
+
+ // might be a match pattern binding
+ tree binding = error_mark_node;
+ if (ctx->lookup_pattern_binding (ref, &binding))
+ {
+ TREE_USED (binding) = 1;
+ return binding;
+ }
+
+ // it might be a function call
+ if (lookup->get_kind () == TyTy::TypeKind::FNDEF)
+ {
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (lookup);
+ tree fn = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &fn))
+ {
+ TREE_USED (fn) = 1;
+ return address_expression (fn, expr_locus);
+ }
+ else if (fntype->get_abi () == ABI::INTRINSIC)
+ {
+ Intrinsics compile (ctx);
+ fn = compile.compile (fntype);
+ TREE_USED (fn) = 1;
+ return address_expression (fn, expr_locus);
+ }
+ }
+
+ // let the query system figure it out
+ tree resolved_item = query_compile (ref, lookup, final_segment, mappings,
+ expr_locus, is_qualified_path);
+ if (resolved_item != error_mark_node)
+ {
+ TREE_USED (resolved_item) = 1;
+ }
+ return resolved_item;
+}
+
+tree
+HIRCompileBase::query_compile (HirId ref, TyTy::BaseType *lookup,
+ const HIR::PathIdentSegment &final_segment,
+ const Analysis::NodeMapping &mappings,
+ Location expr_locus, bool is_qualified_path)
+{
+ HIR::Item *resolved_item = ctx->get_mappings ()->lookup_hir_item (ref);
+ HirId parent_block;
+ HIR::ExternalItem *resolved_extern_item
+ = ctx->get_mappings ()->lookup_hir_extern_item (ref, &parent_block);
+ bool is_hir_item = resolved_item != nullptr;
+ bool is_hir_extern_item = resolved_extern_item != nullptr;
+ if (is_hir_item)
+ {
+ if (!lookup->has_subsititions_defined ())
+ return CompileItem::compile (resolved_item, ctx, nullptr, true,
+ expr_locus);
+ else
+ return CompileItem::compile (resolved_item, ctx, lookup, true,
+ expr_locus);
+ }
+ else if (is_hir_extern_item)
+ {
+ if (!lookup->has_subsititions_defined ())
+ return CompileExternItem::compile (resolved_extern_item, ctx, nullptr,
+ true, expr_locus);
+ else
+ return CompileExternItem::compile (resolved_extern_item, ctx, lookup,
+ true, expr_locus);
+ }
+ else
+ {
+ HirId parent_impl_id = UNKNOWN_HIRID;
+ HIR::ImplItem *resolved_item
+ = ctx->get_mappings ()->lookup_hir_implitem (ref, &parent_impl_id);
+ bool is_impl_item = resolved_item != nullptr;
+ if (is_impl_item)
+ {
+ rust_assert (parent_impl_id != UNKNOWN_HIRID);
+ HIR::Item *impl_ref
+ = ctx->get_mappings ()->lookup_hir_item (parent_impl_id);
+ rust_assert (impl_ref != nullptr);
+ HIR::ImplBlock *impl = static_cast<HIR::ImplBlock *> (impl_ref);
+
+ TyTy::BaseType *self = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ impl->get_type ()->get_mappings ().get_hirid (), &self);
+ rust_assert (ok);
+
+ if (!lookup->has_subsititions_defined ())
+ return CompileInherentImplItem::Compile (resolved_item, ctx,
+ nullptr, true, expr_locus);
+ else
+ return CompileInherentImplItem::Compile (resolved_item, ctx, lookup,
+ true, expr_locus);
+ }
+ else
+ {
+ // it might be resolved to a trait item
+ HIR::TraitItem *trait_item
+ = ctx->get_mappings ()->lookup_hir_trait_item (ref);
+ HIR::Trait *trait = ctx->get_mappings ()->lookup_trait_item_mapping (
+ trait_item->get_mappings ().get_hirid ());
+
+ Resolver::TraitReference *trait_ref
+ = &Resolver::TraitReference::error_node ();
+ bool ok = ctx->get_tyctx ()->lookup_trait_reference (
+ trait->get_mappings ().get_defid (), &trait_ref);
+ rust_assert (ok);
+
+ TyTy::BaseType *receiver = nullptr;
+ ok = ctx->get_tyctx ()->lookup_receiver (mappings.get_hirid (),
+ &receiver);
+ rust_assert (ok);
+
+ if (receiver->get_kind () == TyTy::TypeKind::PARAM)
+ {
+ TyTy::ParamType *p = static_cast<TyTy::ParamType *> (receiver);
+ receiver = p->resolve ();
+ }
+
+ // the type resolver can only resolve type bounds to their trait
+ // item so its up to us to figure out if this path should resolve
+ // to an trait-impl-block-item or if it can be defaulted to the
+ // trait-impl-item's definition
+ std::vector<Resolver::PathProbeCandidate> candidates
+ = Resolver::PathProbeImplTrait::Probe (receiver, final_segment,
+ trait_ref);
+ if (candidates.size () == 0)
+ {
+ // this means we are defaulting back to the trait_item if
+ // possible
+ Resolver::TraitItemReference *trait_item_ref = nullptr;
+ bool ok = trait_ref->lookup_hir_trait_item (*trait_item,
+ &trait_item_ref);
+ rust_assert (ok); // found
+ rust_assert (trait_item_ref->is_optional ()); // has definition
+
+ return CompileTraitItem::Compile (
+ trait_item_ref->get_hir_trait_item (), ctx, lookup, true,
+ expr_locus);
+ }
+ else
+ {
+ Resolver::PathProbeCandidate &candidate = candidates.at (0);
+ rust_assert (candidate.is_impl_candidate ());
+
+ HIR::ImplBlock *impl = candidate.item.impl.parent;
+ HIR::ImplItem *impl_item = candidate.item.impl.impl_item;
+
+ TyTy::BaseType *self = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ impl->get_type ()->get_mappings ().get_hirid (), &self);
+ rust_assert (ok);
+
+ if (!lookup->has_subsititions_defined ())
+ return CompileInherentImplItem::Compile (impl_item, ctx,
+ nullptr, true,
+ expr_locus);
+ else
+ return CompileInherentImplItem::Compile (impl_item, ctx, lookup,
+ true, expr_locus);
+
+ lookup->set_ty_ref (impl_item->get_impl_mappings ().get_hirid ());
+ }
+ }
+ }
+
+ return error_mark_node;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-resolve-path.h b/gcc/rust/backend/rust-compile-resolve-path.h
new file mode 100644
index 00000000000..f0360bdc739
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-resolve-path.h
@@ -0,0 +1,73 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_RESOLVE_PATH
+#define RUST_COMPILE_RESOLVE_PATH
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class ResolvePathRef : public HIRCompileBase, public HIR::HIRPatternVisitor
+{
+public:
+ static tree Compile (HIR::QualifiedPathInExpression &expr, Context *ctx)
+ {
+ ResolvePathRef resolver (ctx);
+ expr.accept_vis (resolver);
+ return resolver.resolved;
+ }
+
+ static tree Compile (HIR::PathInExpression &expr, Context *ctx)
+ {
+ ResolvePathRef resolver (ctx);
+ expr.accept_vis (resolver);
+ return resolver.resolved;
+ }
+
+ void visit (HIR::PathInExpression &expr) override;
+ void visit (HIR::QualifiedPathInExpression &expr) override;
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::IdentifierPattern &) override {}
+ void visit (HIR::LiteralPattern &) override {}
+ void visit (HIR::RangePattern &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::StructPattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+ void visit (HIR::TupleStructPattern &) override {}
+ void visit (HIR::WildcardPattern &) override {}
+
+ ResolvePathRef (Context *ctx)
+ : HIRCompileBase (ctx), resolved (error_mark_node)
+ {}
+
+ tree resolve (const HIR::PathIdentSegment &final_segment,
+ const Analysis::NodeMapping &mappings, Location locus,
+ bool is_qualified_path);
+
+ tree resolved;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_RESOLVE_PATH
diff --git a/gcc/rust/backend/rust-compile-stmt.cc b/gcc/rust/backend/rust-compile-stmt.cc
new file mode 100644
index 00000000000..bfb25f12980
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-stmt.cc
@@ -0,0 +1,115 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-stmt.h"
+#include "rust-compile-expr.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileStmt::CompileStmt (Context *ctx)
+ : HIRCompileBase (ctx), translated (nullptr)
+{}
+
+tree
+CompileStmt::Compile (HIR::Stmt *stmt, Context *ctx)
+{
+ CompileStmt compiler (ctx);
+ stmt->accept_vis (compiler);
+ return compiler.translated;
+}
+
+void
+CompileStmt::visit (HIR::ExprStmtWithBlock &stmt)
+{
+ translated = CompileExpr::Compile (stmt.get_expr (), ctx);
+}
+
+void
+CompileStmt::visit (HIR::ExprStmtWithoutBlock &stmt)
+{
+ translated = CompileExpr::Compile (stmt.get_expr (), ctx);
+}
+
+void
+CompileStmt::visit (HIR::LetStmt &stmt)
+{
+ // nothing to do
+ if (!stmt.has_init_expr ())
+ return;
+
+ const HIR::Pattern &stmt_pattern = *stmt.get_pattern ();
+ HirId stmt_id = stmt_pattern.get_pattern_mappings ().get_hirid ();
+
+ TyTy::BaseType *ty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (stmt_id, &ty))
+ {
+ // FIXME this should be an assertion instead
+ rust_fatal_error (stmt.get_locus (),
+ "failed to lookup variable declaration type");
+ return;
+ }
+
+ Bvariable *var = nullptr;
+ if (!ctx->lookup_var_decl (stmt_id, &var))
+ {
+ // FIXME this should be an assertion instead and use error mark node
+ rust_fatal_error (stmt.get_locus (),
+ "failed to lookup compiled variable declaration");
+ return;
+ }
+
+ tree init = CompileExpr::Compile (stmt.get_init_expr (), ctx);
+ // FIXME use error_mark_node, check that CompileExpr returns error_mark_node
+ // on failure and make this an assertion
+ if (init == nullptr)
+ return;
+
+ TyTy::BaseType *actual = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ stmt.get_init_expr ()->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+ tree stmt_type = TyTyResolveCompile::compile (ctx, ty);
+
+ Location lvalue_locus = stmt.get_pattern ()->get_locus ();
+ Location rvalue_locus = stmt.get_init_expr ()->get_locus ();
+ TyTy::BaseType *expected = ty;
+ init = coercion_site (stmt.get_mappings ().get_hirid (), init, actual,
+ expected, lvalue_locus, rvalue_locus);
+
+ auto fnctx = ctx->peek_fn ();
+ if (ty->is_unit ())
+ {
+ ctx->add_statement (init);
+
+ auto unit_type_init_expr
+ = ctx->get_backend ()->constructor_expression (stmt_type, false, {}, -1,
+ rvalue_locus);
+ auto s = ctx->get_backend ()->init_statement (fnctx.fndecl, var,
+ unit_type_init_expr);
+ ctx->add_statement (s);
+ }
+ else
+ {
+ auto s = ctx->get_backend ()->init_statement (fnctx.fndecl, var, init);
+ ctx->add_statement (s);
+ }
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-stmt.h b/gcc/rust/backend/rust-compile-stmt.h
new file mode 100644
index 00000000000..a0ec8b26667
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-stmt.h
@@ -0,0 +1,69 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_STMT
+#define RUST_COMPILE_STMT
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileStmt : private HIRCompileBase, protected HIR::HIRStmtVisitor
+{
+public:
+ static tree Compile (HIR::Stmt *stmt, Context *ctx);
+
+ void visit (HIR::ExprStmtWithBlock &stmt) override;
+ void visit (HIR::ExprStmtWithoutBlock &stmt) override;
+ void visit (HIR::LetStmt &stmt) override;
+
+ // Empty visit for unused Stmt HIR nodes.
+ void visit (HIR::TupleStruct &) override {}
+ void visit (HIR::EnumItem &) override {}
+ void visit (HIR::EnumItemTuple &) override {}
+ void visit (HIR::EnumItemStruct &) override {}
+ void visit (HIR::EnumItemDiscriminant &) override {}
+ void visit (HIR::TypePathSegmentFunction &) override {}
+ void visit (HIR::TypePath &) override {}
+ void visit (HIR::QualifiedPathInType &) override {}
+ void visit (HIR::Module &) override {}
+ void visit (HIR::ExternCrate &) override {}
+ void visit (HIR::UseDeclaration &) override {}
+ void visit (HIR::Function &) override {}
+ void visit (HIR::TypeAlias &) override {}
+ void visit (HIR::StructStruct &) override {}
+ void visit (HIR::Enum &) override {}
+ void visit (HIR::Union &) override {}
+ void visit (HIR::ConstantItem &) override {}
+ void visit (HIR::StaticItem &) override {}
+ void visit (HIR::Trait &) override {}
+ void visit (HIR::ImplBlock &) override {}
+ void visit (HIR::ExternBlock &) override {}
+ void visit (HIR::EmptyStmt &) override {}
+
+private:
+ CompileStmt (Context *ctx);
+
+ tree translated;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_STMT
diff --git a/gcc/rust/backend/rust-compile-struct-field-expr.cc b/gcc/rust/backend/rust-compile-struct-field-expr.cc
new file mode 100644
index 00000000000..c9a2811f611
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-struct-field-expr.cc
@@ -0,0 +1,81 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-struct-field-expr.h"
+#include "rust-compile-expr.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileStructExprField::CompileStructExprField (Context *ctx)
+ : HIRCompileBase (ctx), translated (error_mark_node)
+{}
+
+tree
+CompileStructExprField::Compile (HIR::StructExprField *field, Context *ctx)
+{
+ CompileStructExprField compiler (ctx);
+ switch (field->get_kind ())
+ {
+ case HIR::StructExprField::StructExprFieldKind::IDENTIFIER:
+ compiler.visit (static_cast<HIR::StructExprFieldIdentifier &> (*field));
+ break;
+
+ case HIR::StructExprField::StructExprFieldKind::IDENTIFIER_VALUE:
+ compiler.visit (
+ static_cast<HIR::StructExprFieldIdentifierValue &> (*field));
+ break;
+
+ case HIR::StructExprField::StructExprFieldKind::INDEX_VALUE:
+ compiler.visit (static_cast<HIR::StructExprFieldIndexValue &> (*field));
+ break;
+ }
+ return compiler.translated;
+}
+
+void
+CompileStructExprField::visit (HIR::StructExprFieldIdentifierValue &field)
+{
+ translated = CompileExpr::Compile (field.get_value (), ctx);
+}
+
+void
+CompileStructExprField::visit (HIR::StructExprFieldIndexValue &field)
+{
+ translated = CompileExpr::Compile (field.get_value (), ctx);
+}
+
+void
+CompileStructExprField::visit (HIR::StructExprFieldIdentifier &field)
+{
+ // we can make the field look like a path expr to take advantage of existing
+ // code
+
+ Analysis::NodeMapping mappings_copy1 = field.get_mappings ();
+ Analysis::NodeMapping mappings_copy2 = field.get_mappings ();
+
+ HIR::PathIdentSegment ident_seg (field.get_field_name ());
+ HIR::PathExprSegment seg (mappings_copy1, ident_seg, field.get_locus (),
+ HIR::GenericArgs::create_empty ());
+ HIR::PathInExpression expr (mappings_copy2, {seg}, field.get_locus (), false,
+ {});
+ translated = CompileExpr::Compile (&expr, ctx);
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-struct-field-expr.h b/gcc/rust/backend/rust-compile-struct-field-expr.h
new file mode 100644
index 00000000000..bc5da080dfe
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-struct-field-expr.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_STRUCT_FIELD_EXPR
+#define RUST_COMPILE_STRUCT_FIELD_EXPR
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileStructExprField : private HIRCompileBase
+{
+public:
+ static tree Compile (HIR::StructExprField *field, Context *ctx);
+
+protected:
+ void visit (HIR::StructExprFieldIdentifierValue &field);
+ void visit (HIR::StructExprFieldIndexValue &field);
+ void visit (HIR::StructExprFieldIdentifier &field);
+
+private:
+ CompileStructExprField (Context *ctx);
+
+ tree translated;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_STRUCT_FIELD_EXPR
diff --git a/gcc/rust/backend/rust-compile-type.cc b/gcc/rust/backend/rust-compile-type.cc
new file mode 100644
index 00000000000..eced909673e
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-type.cc
@@ -0,0 +1,713 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-type.h"
+#include "rust-compile-expr.h"
+#include "rust-constexpr.h"
+
+#include "tree.h"
+
+namespace Rust {
+namespace Compile {
+
+static const std::string RUST_ENUM_DISR_FIELD_NAME = "RUST$ENUM$DISR";
+
+TyTyResolveCompile::TyTyResolveCompile (Context *ctx, bool trait_object_mode)
+ : ctx (ctx), trait_object_mode (trait_object_mode),
+ translated (error_mark_node), recurisve_ops (0)
+{}
+
+tree
+TyTyResolveCompile::compile (Context *ctx, const TyTy::BaseType *ty,
+ bool trait_object_mode)
+{
+ TyTyResolveCompile compiler (ctx, trait_object_mode);
+ ty->accept_vis (compiler);
+
+ if (compiler.translated != error_mark_node
+ && TYPE_NAME (compiler.translated) != NULL)
+ {
+ // canonicalize the type
+ compiler.translated = ctx->insert_compiled_type (compiler.translated);
+ }
+
+ return compiler.translated;
+}
+
+// see: gcc/c/c-decl.cc:8230-8241
+// https://github.com/Rust-GCC/gccrs/blob/0024bc2f028369b871a65ceb11b2fddfb0f9c3aa/gcc/c/c-decl.c#L8229-L8241
+tree
+TyTyResolveCompile::get_implicit_enumeral_node_type (Context *ctx)
+{
+ // static tree enum_node = NULL_TREE;
+ // if (enum_node == NULL_TREE)
+ // {
+ // enum_node = make_node (ENUMERAL_TYPE);
+ // SET_TYPE_MODE (enum_node, TYPE_MODE (unsigned_type_node));
+ // SET_TYPE_ALIGN (enum_node, TYPE_ALIGN (unsigned_type_node));
+ // TYPE_USER_ALIGN (enum_node) = 0;
+ // TYPE_UNSIGNED (enum_node) = 1;
+ // TYPE_PRECISION (enum_node) = TYPE_PRECISION (unsigned_type_node);
+ // TYPE_MIN_VALUE (enum_node) = TYPE_MIN_VALUE (unsigned_type_node);
+ // TYPE_MAX_VALUE (enum_node) = TYPE_MAX_VALUE (unsigned_type_node);
+
+ // // tree identifier = ctx->get_backend ()->get_identifier_node
+ // // ("enumeral"); tree enum_decl
+ // // = build_decl (BUILTINS_LOCATION, TYPE_DECL, identifier,
+ // enum_node);
+ // // TYPE_NAME (enum_node) = enum_decl;
+ // }
+ // return enum_node;
+
+ static tree enum_node = NULL_TREE;
+ if (enum_node == NULL_TREE)
+ {
+ enum_node = ctx->get_backend ()->named_type (
+ "enumeral", ctx->get_backend ()->integer_type (false, 64),
+ Linemap::predeclared_location ());
+ }
+ return enum_node;
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ErrorType &)
+{
+ translated = error_mark_node;
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::InferType &)
+{
+ translated = error_mark_node;
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ClosureType &)
+{
+ gcc_unreachable ();
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ProjectionType &type)
+{
+ type.get ()->accept_vis (*this);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::PlaceholderType &type)
+{
+ type.resolve ()->accept_vis (*this);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ParamType &param)
+{
+ if (recurisve_ops++ >= rust_max_recursion_depth)
+ {
+ rust_error_at (Location (),
+ "%<recursion depth%> count exceeds limit of %i (use "
+ "%<frust-max-recursion-depth=%> to increase the limit)",
+ rust_max_recursion_depth);
+ translated = error_mark_node;
+ return;
+ }
+
+ param.resolve ()->accept_vis (*this);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::FnType &type)
+{
+ Backend::typed_identifier receiver;
+ std::vector<Backend::typed_identifier> parameters;
+ std::vector<Backend::typed_identifier> results;
+
+ if (!type.get_return_type ()->is_unit ())
+ {
+ auto hir_type = type.get_return_type ();
+ auto ret = TyTyResolveCompile::compile (ctx, hir_type, trait_object_mode);
+ results.push_back (Backend::typed_identifier (
+ "_", ret,
+ ctx->get_mappings ()->lookup_location (hir_type->get_ref ())));
+ }
+
+ for (auto &param_pair : type.get_params ())
+ {
+ auto param_tyty = param_pair.second;
+ auto compiled_param_type
+ = TyTyResolveCompile::compile (ctx, param_tyty, trait_object_mode);
+
+ auto compiled_param = Backend::typed_identifier (
+ param_pair.first->as_string (), compiled_param_type,
+ ctx->get_mappings ()->lookup_location (param_tyty->get_ref ()));
+
+ parameters.push_back (compiled_param);
+ }
+
+ if (!type.is_varadic ())
+ translated
+ = ctx->get_backend ()->function_type (receiver, parameters, results, NULL,
+ type.get_ident ().locus);
+ else
+ translated
+ = ctx->get_backend ()->function_type_varadic (receiver, parameters,
+ results, NULL,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::FnPtr &type)
+{
+ tree result_type = TyTyResolveCompile::compile (ctx, type.get_return_type ());
+
+ std::vector<tree> parameters;
+
+ auto &params = type.get_params ();
+ for (auto &p : params)
+ {
+ tree pty = TyTyResolveCompile::compile (ctx, p.get_tyty ());
+ parameters.push_back (pty);
+ }
+
+ translated = ctx->get_backend ()->function_ptr_type (result_type, parameters,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ADTType &type)
+{
+ tree type_record = error_mark_node;
+ if (!type.is_enum ())
+ {
+ rust_assert (type.number_of_variants () == 1);
+
+ TyTy::VariantDef &variant = *type.get_variants ().at (0);
+ std::vector<Backend::typed_identifier> fields;
+ for (size_t i = 0; i < variant.num_fields (); i++)
+ {
+ const TyTy::StructFieldType *field = variant.get_field_at_index (i);
+ tree compiled_field_ty
+ = TyTyResolveCompile::compile (ctx, field->get_field_type ());
+
+ Backend::typed_identifier f (field->get_name (), compiled_field_ty,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ fields.push_back (std::move (f));
+ }
+
+ type_record = type.is_union ()
+ ? ctx->get_backend ()->union_type (fields)
+ : ctx->get_backend ()->struct_type (fields);
+ }
+ else
+ {
+ // see:
+ // https://github.com/bminor/binutils-gdb/blob/527b8861cd472385fa9160a91dd6d65a25c41987/gdb/dwarf2/read.c#L9010-L9241
+ //
+ // enums are actually a big union so for example the rust enum:
+ //
+ // enum AnEnum {
+ // A,
+ // B,
+ // C (char),
+ // D { x: i64, y: i64 },
+ // }
+ //
+ // we actually turn this into
+ //
+ // union {
+ // struct A { int RUST$ENUM$DISR; }; <- this is a data-less variant
+ // struct B { int RUST$ENUM$DISR; }; <- this is a data-less variant
+ // struct C { int RUST$ENUM$DISR; char __0; };
+ // struct D { int RUST$ENUM$DISR; i64 x; i64 y; };
+ // }
+ //
+ // Ada, qual_union_types might still work for this but I am not 100% sure.
+ // I ran into some issues lets reuse our normal union and ask Ada people
+ // about it.
+
+ std::vector<tree> variant_records;
+ for (auto &variant : type.get_variants ())
+ {
+ std::vector<Backend::typed_identifier> fields;
+
+ // add in the qualifier field for the variant
+ tree enumeral_type
+ = TyTyResolveCompile::get_implicit_enumeral_node_type (ctx);
+ Backend::typed_identifier f (RUST_ENUM_DISR_FIELD_NAME, enumeral_type,
+ ctx->get_mappings ()->lookup_location (
+ variant->get_id ()));
+ fields.push_back (std::move (f));
+
+ // compile the rest of the fields
+ for (size_t i = 0; i < variant->num_fields (); i++)
+ {
+ const TyTy::StructFieldType *field
+ = variant->get_field_at_index (i);
+ tree compiled_field_ty
+ = TyTyResolveCompile::compile (ctx, field->get_field_type ());
+
+ std::string field_name = field->get_name ();
+ if (variant->get_variant_type ()
+ == TyTy::VariantDef::VariantType::TUPLE)
+ field_name = "__" + field->get_name ();
+
+ Backend::typed_identifier f (
+ field_name, compiled_field_ty,
+ ctx->get_mappings ()->lookup_location (type.get_ty_ref ()));
+ fields.push_back (std::move (f));
+ }
+
+ tree variant_record = ctx->get_backend ()->struct_type (fields);
+ tree named_variant_record = ctx->get_backend ()->named_type (
+ variant->get_ident ().path.get (), variant_record,
+ variant->get_ident ().locus);
+
+ // set the qualifier to be a builtin
+ DECL_ARTIFICIAL (TYPE_FIELDS (variant_record)) = 1;
+
+ // add them to the list
+ variant_records.push_back (named_variant_record);
+ }
+
+ // now we need to make the actual union, but first we need to make
+ // named_type TYPE_DECL's out of the variants
+
+ size_t i = 0;
+ std::vector<Backend::typed_identifier> enum_fields;
+ for (auto &variant_record : variant_records)
+ {
+ TyTy::VariantDef *variant = type.get_variants ().at (i++);
+ std::string implicit_variant_name = variant->get_identifier ();
+
+ Backend::typed_identifier f (implicit_variant_name, variant_record,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ enum_fields.push_back (std::move (f));
+ }
+
+ // finally make the union or the enum
+ type_record = ctx->get_backend ()->union_type (enum_fields);
+ }
+
+ // Handle repr options
+ // TODO: "packed" should only narrow type alignment and "align" should only
+ // widen it. Do we need to check and enforce this here, or is it taken care of
+ // later on in the gcc middle-end?
+ TyTy::ADTType::ReprOptions repr = type.get_repr_options ();
+ if (repr.pack)
+ {
+ TYPE_PACKED (type_record) = 1;
+ if (repr.pack > 1)
+ {
+ SET_TYPE_ALIGN (type_record, repr.pack * 8);
+ TYPE_USER_ALIGN (type_record) = 1;
+ }
+ }
+ else if (repr.align)
+ {
+ SET_TYPE_ALIGN (type_record, repr.align * 8);
+ TYPE_USER_ALIGN (type_record) = 1;
+ }
+
+ std::string named_struct_str
+ = type.get_ident ().path.get () + type.subst_as_string ();
+ translated = ctx->get_backend ()->named_type (named_struct_str, type_record,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::TupleType &type)
+{
+ if (type.num_fields () == 0)
+ {
+ translated = ctx->get_backend ()->unit_type ();
+ return;
+ }
+
+ // create implicit struct
+ std::vector<Backend::typed_identifier> fields;
+ for (size_t i = 0; i < type.num_fields (); i++)
+ {
+ TyTy::BaseType *field = type.get_field (i);
+ tree compiled_field_ty = TyTyResolveCompile::compile (ctx, field);
+
+ // rustc uses the convention __N, where N is an integer, to
+ // name the fields of a tuple. We follow this as well,
+ // because this is used by GDB. One further reason to prefer
+ // this, rather than simply emitting the integer, is that this
+ // approach makes it simpler to use a C-only debugger, or
+ // GDB's C mode, when debugging Rust.
+ Backend::typed_identifier f ("__" + std::to_string (i), compiled_field_ty,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ fields.push_back (std::move (f));
+ }
+
+ tree struct_type_record = ctx->get_backend ()->struct_type (fields);
+ translated
+ = ctx->get_backend ()->named_type (type.as_string (), struct_type_record,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ArrayType &type)
+{
+ tree element_type
+ = TyTyResolveCompile::compile (ctx, type.get_element_type ());
+ tree capacity_expr = CompileExpr::Compile (&type.get_capacity_expr (), ctx);
+ tree folded_capacity_expr = fold_expr (capacity_expr);
+
+ translated
+ = ctx->get_backend ()->array_type (element_type, folded_capacity_expr);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::SliceType &type)
+{
+ tree type_record = create_slice_type_record (type);
+
+ std::string named_struct_str
+ = std::string ("[") + type.get_element_type ()->get_name () + "]";
+ translated = ctx->get_backend ()->named_type (named_struct_str, type_record,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::BoolType &type)
+{
+ translated
+ = ctx->get_backend ()->named_type ("bool",
+ ctx->get_backend ()->bool_type (),
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::IntType &type)
+{
+ switch (type.get_int_kind ())
+ {
+ case TyTy::IntType::I8:
+ translated = ctx->get_backend ()->named_type (
+ "i8", ctx->get_backend ()->integer_type (false, 8),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::IntType::I16:
+ translated = ctx->get_backend ()->named_type (
+ "i16", ctx->get_backend ()->integer_type (false, 16),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::IntType::I32:
+ translated = ctx->get_backend ()->named_type (
+ "i32", ctx->get_backend ()->integer_type (false, 32),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::IntType::I64:
+ translated = ctx->get_backend ()->named_type (
+ "i64", ctx->get_backend ()->integer_type (false, 64),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::IntType::I128:
+ translated = ctx->get_backend ()->named_type (
+ "i128", ctx->get_backend ()->integer_type (false, 128),
+ Linemap::predeclared_location ());
+ return;
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::UintType &type)
+{
+ switch (type.get_uint_kind ())
+ {
+ case TyTy::UintType::U8:
+ translated = ctx->get_backend ()->named_type (
+ "u8", ctx->get_backend ()->integer_type (true, 8),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::UintType::U16:
+ translated = ctx->get_backend ()->named_type (
+ "u16", ctx->get_backend ()->integer_type (true, 16),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::UintType::U32:
+ translated = ctx->get_backend ()->named_type (
+ "u32", ctx->get_backend ()->integer_type (true, 32),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::UintType::U64:
+ translated = ctx->get_backend ()->named_type (
+ "u64", ctx->get_backend ()->integer_type (true, 64),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::UintType::U128:
+ translated = ctx->get_backend ()->named_type (
+ "u128", ctx->get_backend ()->integer_type (true, 128),
+ Linemap::predeclared_location ());
+ return;
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::FloatType &type)
+{
+ switch (type.get_float_kind ())
+ {
+ case TyTy::FloatType::F32:
+ translated
+ = ctx->get_backend ()->named_type ("f32",
+ ctx->get_backend ()->float_type (32),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::FloatType::F64:
+ translated
+ = ctx->get_backend ()->named_type ("f64",
+ ctx->get_backend ()->float_type (64),
+ Linemap::predeclared_location ());
+ return;
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::USizeType &type)
+{
+ translated = ctx->get_backend ()->named_type (
+ "usize",
+ ctx->get_backend ()->integer_type (
+ true, ctx->get_backend ()->get_pointer_size ()),
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ISizeType &type)
+{
+ translated = ctx->get_backend ()->named_type (
+ "isize",
+ ctx->get_backend ()->integer_type (
+ false, ctx->get_backend ()->get_pointer_size ()),
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::CharType &type)
+{
+ translated
+ = ctx->get_backend ()->named_type ("char",
+ ctx->get_backend ()->wchar_type (),
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ReferenceType &type)
+{
+ const TyTy::SliceType *slice = nullptr;
+ const TyTy::StrType *str = nullptr;
+ if (type.is_dyn_slice_type (&slice))
+ {
+ tree type_record = create_slice_type_record (*slice);
+ std::string dyn_slice_type_str
+ = std::string (type.is_mutable () ? "&mut " : "&") + "["
+ + slice->get_element_type ()->get_name () + "]";
+
+ translated
+ = ctx->get_backend ()->named_type (dyn_slice_type_str, type_record,
+ slice->get_locus ());
+
+ return;
+ }
+ else if (type.is_dyn_str_type (&str))
+ {
+ tree type_record = create_str_type_record (*str);
+ std::string dyn_str_type_str
+ = std::string (type.is_mutable () ? "&mut " : "&") + "str";
+
+ translated
+ = ctx->get_backend ()->named_type (dyn_str_type_str, type_record,
+ str->get_locus ());
+
+ return;
+ }
+
+ tree base_compiled_type
+ = TyTyResolveCompile::compile (ctx, type.get_base (), trait_object_mode);
+ if (type.is_mutable ())
+ {
+ translated = ctx->get_backend ()->reference_type (base_compiled_type);
+ }
+ else
+ {
+ auto base = ctx->get_backend ()->immutable_type (base_compiled_type);
+ translated = ctx->get_backend ()->reference_type (base);
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::PointerType &type)
+{
+ const TyTy::SliceType *slice = nullptr;
+ const TyTy::StrType *str = nullptr;
+ if (type.is_dyn_slice_type (&slice))
+ {
+ tree type_record = create_slice_type_record (*slice);
+ std::string dyn_slice_type_str
+ = std::string (type.is_mutable () ? "*mut " : "*const ") + "["
+ + slice->get_element_type ()->get_name () + "]";
+
+ translated
+ = ctx->get_backend ()->named_type (dyn_slice_type_str, type_record,
+ slice->get_locus ());
+
+ return;
+ }
+ else if (type.is_dyn_str_type (&str))
+ {
+ tree type_record = create_str_type_record (*str);
+ std::string dyn_str_type_str
+ = std::string (type.is_mutable () ? "*mut " : "*const ") + "str";
+
+ translated
+ = ctx->get_backend ()->named_type (dyn_str_type_str, type_record,
+ str->get_locus ());
+
+ return;
+ }
+
+ tree base_compiled_type
+ = TyTyResolveCompile::compile (ctx, type.get_base (), trait_object_mode);
+ if (type.is_mutable ())
+ {
+ translated = ctx->get_backend ()->pointer_type (base_compiled_type);
+ }
+ else
+ {
+ auto base = ctx->get_backend ()->immutable_type (base_compiled_type);
+ translated = ctx->get_backend ()->pointer_type (base);
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::StrType &type)
+{
+ tree raw_str = create_str_type_record (type);
+ translated
+ = ctx->get_backend ()->named_type ("str", raw_str,
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::NeverType &)
+{
+ translated = ctx->get_backend ()->unit_type ();
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::DynamicObjectType &type)
+{
+ if (trait_object_mode)
+ {
+ translated = ctx->get_backend ()->integer_type (
+ true, ctx->get_backend ()->get_pointer_size ());
+ return;
+ }
+
+ // create implicit struct
+ auto items = type.get_object_items ();
+ std::vector<Backend::typed_identifier> fields;
+
+ tree uint = ctx->get_backend ()->integer_type (
+ true, ctx->get_backend ()->get_pointer_size ());
+ tree uintptr_ty = build_pointer_type (uint);
+
+ Backend::typed_identifier f ("pointer", uintptr_ty,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ fields.push_back (std::move (f));
+
+ tree vtable_size = build_int_cst (size_type_node, items.size ());
+ tree vtable_type = ctx->get_backend ()->array_type (uintptr_ty, vtable_size);
+ Backend::typed_identifier vtf ("vtable", vtable_type,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ fields.push_back (std::move (vtf));
+
+ tree type_record = ctx->get_backend ()->struct_type (fields);
+ translated = ctx->get_backend ()->named_type (type.get_name (), type_record,
+ type.get_ident ().locus);
+}
+
+tree
+TyTyResolveCompile::create_slice_type_record (const TyTy::SliceType &type)
+{
+ // lookup usize
+ TyTy::BaseType *usize = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_builtin ("usize", &usize);
+ rust_assert (ok);
+
+ tree element_type
+ = TyTyResolveCompile::compile (ctx, type.get_element_type ());
+ tree data_field_ty = build_pointer_type (element_type);
+ Backend::typed_identifier data_field ("data", data_field_ty,
+ type.get_locus ());
+
+ tree len_field_ty = TyTyResolveCompile::compile (ctx, usize);
+ Backend::typed_identifier len_field ("len", len_field_ty, type.get_locus ());
+
+ tree record = ctx->get_backend ()->struct_type ({data_field, len_field});
+ SLICE_FLAG (record) = 1;
+ TYPE_MAIN_VARIANT (record) = ctx->insert_main_variant (record);
+
+ return record;
+}
+
+tree
+TyTyResolveCompile::create_str_type_record (const TyTy::StrType &type)
+{
+ // lookup usize
+ TyTy::BaseType *usize = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_builtin ("usize", &usize);
+ rust_assert (ok);
+
+ tree char_ptr = build_pointer_type (char_type_node);
+ tree const_char_type = build_qualified_type (char_ptr, TYPE_QUAL_CONST);
+
+ tree element_type = const_char_type;
+ tree data_field_ty = build_pointer_type (element_type);
+ Backend::typed_identifier data_field ("data", data_field_ty,
+ type.get_locus ());
+
+ tree len_field_ty = TyTyResolveCompile::compile (ctx, usize);
+ Backend::typed_identifier len_field ("len", len_field_ty, type.get_locus ());
+
+ tree record = ctx->get_backend ()->struct_type ({data_field, len_field});
+ SLICE_FLAG (record) = 1;
+ TYPE_MAIN_VARIANT (record) = ctx->insert_main_variant (record);
+
+ return record;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-type.h b/gcc/rust/backend/rust-compile-type.h
new file mode 100644
index 00000000000..b52fd71bf6b
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-type.h
@@ -0,0 +1,79 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_TYPE
+#define RUST_COMPILE_TYPE
+
+#include "rust-compile-context.h"
+
+namespace Rust {
+namespace Compile {
+
+class TyTyResolveCompile : protected TyTy::TyConstVisitor
+{
+public:
+ static tree compile (Context *ctx, const TyTy::BaseType *ty,
+ bool trait_object_mode = false);
+
+ static tree get_implicit_enumeral_node_type (Context *ctx);
+
+ void visit (const TyTy::InferType &) override;
+ void visit (const TyTy::ADTType &) override;
+ void visit (const TyTy::TupleType &) override;
+ void visit (const TyTy::FnType &) override;
+ void visit (const TyTy::FnPtr &) override;
+ void visit (const TyTy::ArrayType &) override;
+ void visit (const TyTy::SliceType &) override;
+ void visit (const TyTy::BoolType &) override;
+ void visit (const TyTy::IntType &) override;
+ void visit (const TyTy::UintType &) override;
+ void visit (const TyTy::FloatType &) override;
+ void visit (const TyTy::USizeType &) override;
+ void visit (const TyTy::ISizeType &) override;
+ void visit (const TyTy::ErrorType &) override;
+ void visit (const TyTy::CharType &) override;
+ void visit (const TyTy::ReferenceType &) override;
+ void visit (const TyTy::PointerType &) override;
+ void visit (const TyTy::ParamType &) override;
+ void visit (const TyTy::StrType &) override;
+ void visit (const TyTy::NeverType &) override;
+ void visit (const TyTy::PlaceholderType &) override;
+ void visit (const TyTy::ProjectionType &) override;
+ void visit (const TyTy::DynamicObjectType &) override;
+ void visit (const TyTy::ClosureType &) override;
+
+public:
+ static hashval_t type_hasher (tree type);
+
+protected:
+ tree create_slice_type_record (const TyTy::SliceType &type);
+ tree create_str_type_record (const TyTy::StrType &type);
+
+private:
+ TyTyResolveCompile (Context *ctx, bool trait_object_mode);
+
+ Context *ctx;
+ bool trait_object_mode;
+ tree translated;
+ int recurisve_ops;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_TYPE
diff --git a/gcc/rust/backend/rust-compile-var-decl.h b/gcc/rust/backend/rust-compile-var-decl.h
new file mode 100644
index 00000000000..e2ee05b8163
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-var-decl.h
@@ -0,0 +1,95 @@
+// Copyright (C) 2020-2022 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_VAR_DECL
+#define RUST_COMPILE_VAR_DECL
+
+#include "rust-compile-base.h"
+#include "rust-hir-visitor.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileVarDecl : public HIRCompileBase, public HIR::HIRPatternVisitor
+{
+ using HIR::HIRPatternVisitor::visit;
+
+public:
+ static ::Bvariable *compile (tree fndecl, tree translated_type,
+ HIR::Pattern *pattern, Context *ctx)
+ {
+ CompileVarDecl compiler (ctx, fndecl, translated_type);
+ pattern->accept_vis (compiler);
+ return compiler.compiled_variable;
+ }
+
+ void visit (HIR::IdentifierPattern &pattern) override
+ {
+ if (!pattern.is_mut ())
+ translated_type = ctx->get_backend ()->immutable_type (translated_type);
+
+ compiled_variable
+ = ctx->get_backend ()->local_variable (fndecl, pattern.get_identifier (),
+ translated_type, NULL /*decl_var*/,
+ pattern.get_locus ());
+
+ HirId stmt_id = pattern.get_pattern_mappings ().get_hirid ();
+ ctx->insert_var_decl (stmt_id, compiled_variable);
+ }
+
+ void visit (HIR::WildcardPattern &pattern) override
+ {
+ translated_type = ctx->get_backend ()->immutable_type (translated_type);
+
+ compiled_variable
+ = ctx->get_backend ()->local_variable (fndecl, "_", translated_type,
+ NULL /*decl_var*/,
+ pattern.get_locus ());
+
+ HirId stmt_id = pattern.get_pattern_mappings ().get_hirid ();
+ ctx->insert_var_decl (stmt_id, compiled_variable);
+ }
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::LiteralPattern &) override {}
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::RangePattern &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::StructPattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+ void visit (HIR::TupleStructPattern &) override {}
+
+private:
+ CompileVarDecl (Context *ctx, tree fndecl, tree translated_type)
+ : HIRCompileBase (ctx), fndecl (fndecl), translated_type (translated_type),
+ compiled_variable (ctx->get_backend ()->error_variable ())
+ {}
+
+ tree fndecl;
+ tree translated_type;
+
+ Bvariable *compiled_variable;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_VAR_DECL