2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Processor architecture specification.
23 * @author Sebastian Hack
25 #ifndef FIRM_BE_BEARCH_H
26 #define FIRM_BE_BEARCH_H
30 #include "firm_types.h"
32 #include "raw_bitset.h"
39 * this constant is returned by the get_sp_bias functions if the stack
40 * is reset (usually because the frame pointer is copied to the stack
43 #define SP_BIAS_RESET INT_MIN
45 typedef enum arch_register_class_flags_t {
46 arch_register_class_flag_none = 0,
47 /** don't do automatic register allocation for this class */
48 arch_register_class_flag_manual_ra = 1U << 0,
49 /** the register models an abstract state (example: fpu rounding mode) */
50 arch_register_class_flag_state = 1U << 1
51 } arch_register_class_flags_t;
52 ENUM_BITSET(arch_register_class_flags_t)
54 typedef enum arch_register_type_t {
55 arch_register_type_none = 0,
56 /** Do not consider this register when allocating. */
57 arch_register_type_ignore = 1U << 0,
58 /** The emitter can choose an arbitrary register. The register fulfills any
59 * register constraints as long as the register class matches */
60 arch_register_type_joker = 1U << 1,
61 /** This is just a virtual register. Virtual registers fulfill any register
62 * constraints as long as the register class matches. It is a allowed to
63 * have multiple definitions for the same virtual register at a point */
64 arch_register_type_virtual = 1U << 2,
65 /** The register represents a state that should be handled by bestate
67 arch_register_type_state = 1U << 3,
68 } arch_register_type_t;
69 ENUM_BITSET(arch_register_type_t)
72 * Different types of register allocation requirements.
74 typedef enum arch_register_req_type_t {
75 /** No register requirement. */
76 arch_register_req_type_none = 0,
77 /** All registers in the class are allowed. */
78 arch_register_req_type_normal = 1U << 0,
79 /** Only a real subset of the class is allowed. */
80 arch_register_req_type_limited = 1U << 1,
81 /** The register should be equal to another one at the node. */
82 arch_register_req_type_should_be_same = 1U << 2,
83 /** The register must be unequal from some other at the node. */
84 arch_register_req_type_must_be_different = 1U << 3,
85 /** The registernumber should be aligned (in case of multiregister values)*/
86 arch_register_req_type_aligned = 1U << 4,
87 /** ignore while allocating registers */
88 arch_register_req_type_ignore = 1U << 5,
89 /** the output produces a new value for the stack pointer
90 * (this is not really a constraint but a marker to guide the stackpointer
92 arch_register_req_type_produces_sp = 1U << 6,
93 } arch_register_req_type_t;
94 ENUM_BITSET(arch_register_req_type_t)
96 extern arch_register_req_t const arch_no_requirement;
97 #define arch_no_register_req (&arch_no_requirement)
100 * Print information about a register requirement in human readable form
101 * @param F output stream/file
102 * @param req The requirements structure to format.
104 void arch_dump_register_req(FILE *F, const arch_register_req_t *req,
105 const ir_node *node);
107 void arch_dump_register_reqs(FILE *F, const ir_node *node);
108 void arch_dump_reqs_and_registers(FILE *F, const ir_node *node);
110 void arch_set_frame_offset(ir_node *irn, int bias);
112 ir_entity *arch_get_frame_entity(const ir_node *irn);
113 int arch_get_sp_bias(ir_node *irn);
115 int arch_get_op_estimated_cost(const ir_node *irn);
116 arch_inverse_t *arch_get_inverse(const ir_node *irn, int i,
117 arch_inverse_t *inverse,
118 struct obstack *obstack);
119 int arch_possible_memory_operand(const ir_node *irn,
121 void arch_perform_memory_operand(ir_node *irn, ir_node *spill,
125 * Get the register allocated for a value.
127 const arch_register_t *arch_get_irn_register(const ir_node *irn);
130 * Assign register to a value
132 void arch_set_irn_register(ir_node *irn, const arch_register_t *reg);
135 * Set the register for a certain output operand.
137 void arch_set_irn_register_out(ir_node *irn, unsigned pos, const arch_register_t *r);
139 const arch_register_t *arch_get_irn_register_out(const ir_node *irn, unsigned pos);
140 const arch_register_t *arch_get_irn_register_in(const ir_node *irn, int pos);
143 * Get register constraints for an operand at position @p
145 static inline const arch_register_req_t *arch_get_irn_register_req_in(
146 const ir_node *node, int pos)
148 const backend_info_t *info = be_get_info(node);
149 return info->in_reqs[pos];
153 * Get register constraint for a produced result (the @p pos result)
155 static inline const arch_register_req_t *arch_get_irn_register_req_out(
156 const ir_node *node, unsigned pos)
158 const backend_info_t *info = be_get_info(node);
159 return info->out_infos[pos].req;
162 static inline void arch_set_irn_register_req_out(ir_node *node, unsigned pos,
163 const arch_register_req_t *req)
165 backend_info_t *info = be_get_info(node);
166 assert(pos < (unsigned)ARR_LEN(info->out_infos));
167 info->out_infos[pos].req = req;
170 static inline void arch_set_irn_register_reqs_in(ir_node *node,
171 const arch_register_req_t **reqs)
173 backend_info_t *info = be_get_info(node);
174 info->in_reqs = reqs;
177 static inline const arch_register_req_t **arch_get_irn_register_reqs_in(
180 backend_info_t *info = be_get_info(node);
181 return info->in_reqs;
184 const arch_register_req_t *arch_get_irn_register_req(const ir_node *node);
187 * Get the flags of a node.
188 * @param irn The node.
191 arch_irn_flags_t arch_get_irn_flags(const ir_node *irn);
193 void arch_set_irn_flags(ir_node *node, arch_irn_flags_t flags);
194 void arch_add_irn_flags(ir_node *node, arch_irn_flags_t flags);
196 #define arch_irn_is(irn, flag) ((arch_get_irn_flags(irn) & arch_irn_flags_ ## flag) != 0)
198 static inline unsigned arch_get_irn_n_outs(const ir_node *node)
200 backend_info_t *info = be_get_info(node);
201 if (info->out_infos == NULL)
204 return (unsigned)ARR_LEN(info->out_infos);
208 * Start codegeneration
210 arch_env_t *arch_env_begin_codegeneration(const arch_isa_if_t *isa,
211 be_main_env_t *main_env);
214 * Register an instruction set architecture
216 void be_register_isa_if(const char *name, const arch_isa_if_t *isa);
221 struct arch_register_t {
222 const char *name; /**< The name of the register. */
223 const arch_register_class_t *reg_class; /**< The class of the register */
224 unsigned short index; /**< The index of the register in
226 unsigned short global_index; /**< The global index this
227 register in the architecture. */
228 arch_register_type_t type; /**< The type of the register. */
229 /** register constraint allowing just this register */
230 const arch_register_req_t *single_req;
231 /** register number in dwarf debugging format */
232 unsigned short dwarf_number;
236 * A class of registers.
237 * Like general purpose or floating point.
239 struct arch_register_class_t {
240 unsigned index; /**< index of this register class */
241 const char *name; /**< The name of the register class.*/
242 unsigned n_regs; /**< Number of registers in this
244 ir_mode *mode; /**< The mode of the register class.*/
245 const arch_register_t *regs; /**< The array of registers. */
246 arch_register_class_flags_t flags; /**< register class flags. */
247 const arch_register_req_t *class_req;
250 /** return the number of registers in this register class */
251 #define arch_register_class_n_regs(cls) ((cls)->n_regs)
253 /** return the largest mode of this register class */
254 #define arch_register_class_mode(cls) ((cls)->mode)
256 /** return the name of this register class */
257 #define arch_register_class_name(cls) ((cls)->name)
259 /** return the index of this register class */
260 #define arch_register_class_index(cls) ((cls)->index)
262 /** return the register class flags */
263 #define arch_register_class_flags(cls) ((cls)->flags)
265 static inline const arch_register_t *arch_register_for_index(
266 const arch_register_class_t *cls, unsigned idx)
268 assert(idx < cls->n_regs);
269 return &cls->regs[idx];
273 * Convenience macro to check for set constraints.
274 * @param req A pointer to register requirements.
275 * @param kind The kind of constraint to check for
276 * (see arch_register_req_type_t).
277 * @return 1, If the kind of constraint is present, 0 if not.
279 #define arch_register_req_is(req, kind) \
280 (((req)->type & (arch_register_req_type_ ## kind)) != 0)
283 * Expresses requirements to register allocation for an operand.
285 struct arch_register_req_t {
286 arch_register_req_type_t type; /**< The type of the constraint. */
287 const arch_register_class_t *cls; /**< The register class this constraint
289 const unsigned *limited; /**< allowed register bitset
290 (in case of wide-values this is
291 only about the first register) */
292 unsigned other_same; /**< Bitmask of ins which should use the
293 same register (should_be_same). */
294 unsigned other_different; /**< Bitmask of ins which shall use a
296 (must_be_different) */
297 unsigned char width; /**< specifies how many sequential
298 registers are required */
301 static inline bool reg_reqs_equal(const arch_register_req_t *req1,
302 const arch_register_req_t *req2)
307 if (req1->type != req2->type ||
308 req1->cls != req2->cls ||
309 req1->other_same != req2->other_same ||
310 req1->other_different != req2->other_different ||
311 (req1->limited != NULL) != (req2->limited != NULL))
314 if (req1->limited != NULL) {
315 size_t const n_regs = arch_register_class_n_regs(req1->cls);
316 if (!rbitsets_equal(req1->limited, req2->limited, n_regs))
324 * An inverse operation returned by the backend
326 struct arch_inverse_t {
327 int n; /**< count of nodes returned in nodes array */
328 int costs; /**< costs of this remat */
330 /** nodes for this inverse operation. shall be in schedule order.
331 * last element is the target value */
335 struct arch_irn_ops_t {
338 * Get the entity on the stack frame this node depends on.
339 * @param irn The node in question.
340 * @return The entity on the stack frame or NULL, if the node does not have
341 * a stack frame entity.
343 ir_entity *(*get_frame_entity)(const ir_node *irn);
346 * Set the offset of a node carrying an entity on the stack frame.
347 * @param irn The node.
348 * @param offset The offset of the node's stack frame entity.
350 void (*set_frame_offset)(ir_node *irn, int offset);
353 * Returns the delta of the stackpointer for nodes that increment or
354 * decrement the stackpointer with a constant value. (push, pop
355 * nodes on most architectures).
356 * A positive value stands for an expanding stack area, a negative value for
359 * @param irn The node
360 * @return 0 if the stackpointer is not modified with a constant
361 * value, otherwise the increment/decrement value
363 int (*get_sp_bias)(const ir_node *irn);
366 * Returns an inverse operation which yields the i-th argument
367 * of the given node as result.
369 * @param irn The original operation
370 * @param i Index of the argument we want the inverse operation to
372 * @param inverse struct to be filled with the resulting inverse op
373 * @param obstack The obstack to use for allocation of the returned nodes
375 * @return The inverse operation or NULL if operation invertible
377 arch_inverse_t *(*get_inverse)(const ir_node *irn, int i,
378 arch_inverse_t *inverse,
379 struct obstack *obstack);
382 * Get the estimated cycle count for @p irn.
384 * @param irn The node.
385 * @return The estimated cycle count for this operation
387 int (*get_op_estimated_cost)(const ir_node *irn);
390 * Asks the backend whether operand @p i of @p irn can be loaded form memory
393 * @param irn The node.
394 * @param i Index of the argument we would like to know whether @p irn
395 * can load it form memory internally
396 * @return nonzero if argument can be loaded or zero otherwise
398 int (*possible_memory_operand)(const ir_node *irn, unsigned int i);
401 * Ask the backend to assimilate @p reload of operand @p i into @p irn.
403 * @param irn The node.
404 * @param spill The spill.
405 * @param i The position of the reload.
407 void (*perform_memory_operand)(ir_node *irn, ir_node *spill,
412 * Architecture interface.
414 struct arch_isa_if_t {
416 * Initializes the isa interface. This is necessary before calling any
417 * other functions from this interface.
422 * Fress resources allocated by this isa interface.
424 void (*finish)(void);
427 * Returns the frontend settings needed for this backend.
429 const backend_params *(*get_params)(void);
432 * lowers current program for target. See the documentation for
433 * be_lower_for_target() for details.
435 void (*lower_for_target)(void);
438 * parse an assembler constraint part and set flags according to its nature
439 * advances the *c pointer to point to the last parsed character (so if you
440 * parse a single character don't advance c)
442 asm_constraint_flags_t (*parse_asm_constraint)(const char **c);
445 * returns true if the string is a valid clobbered (register) in this
448 int (*is_valid_clobber)(const char *clobber);
451 * Start codegeneration
452 * @return a new isa instance
454 arch_env_t *(*begin_codegeneration)(const be_main_env_t *env);
457 * Free the isa instance.
459 void (*end_codegeneration)(void *self);
462 * Initialize the code generator for a graph
465 void (*init_graph)(ir_graph *irg);
468 * Get the ABI restrictions for procedure calls.
469 * @param call_type The call type of the method (procedure) in question.
470 * @param p The array of parameter locations to be filled.
472 void (*get_call_abi)(ir_type *call_type, be_abi_call_t *abi);
475 * mark node as rematerialized
477 void (*mark_remat)(ir_node *node);
480 * return node used as base in pic code addresses
482 ir_node* (*get_pic_base)(ir_graph *irg);
485 * Create a spill instruction. We assume that spill instructions
486 * do not need any additional registers and do not affect cpu-flags in any
488 * Construct a sequence of instructions after @p after (the resulting nodes
489 * are already scheduled).
490 * Returns a mode_M value which is used as input for a reload instruction.
492 ir_node *(*new_spill)(ir_node *value, ir_node *after);
495 * Create a reload instruction. We assume that reload instructions do not
496 * need any additional registers and do not affect cpu-flags in any way.
497 * Constructs a sequence of instruction before @p before (the resulting
498 * nodes are already scheduled). A rewiring of users is not performed in
500 * Returns a value representing the restored value.
502 ir_node *(*new_reload)(ir_node *value, ir_node *spilled_value,
506 * Checks if the given register is callee/caller saved.
507 * @deprecated, only necessary if backend still uses beabi functions
509 int (*register_saved_by)(const arch_register_t *reg, int callee);
512 * Called directly after initialization. Backend should handle all
515 void (*handle_intrinsics)(void);
518 * Called before abi introduce.
520 void (*before_abi)(ir_graph *irg);
523 * Called, when the graph is being normalized.
525 void (*prepare_graph)(ir_graph *irg);
528 * Called before register allocation.
530 void (*before_ra)(ir_graph *irg);
533 * Called directly before done is called. This should be the last place
534 * where the irg is modified.
536 void (*finish_graph)(ir_graph *irg);
539 * Called after everything happened. This call should emit the final
540 * assembly code but avoid changing the irg.
542 void (*emit)(ir_graph *irg);
545 #define arch_env_end_codegeneration(env) ((env)->impl->end_codegeneration(env))
546 #define arch_env_handle_intrinsics(env) \
547 do { if((env)->impl->handle_intrinsics != NULL) (env)->impl->handle_intrinsics(); } while(0)
548 #define arch_env_get_call_abi(env,tp,abi) ((env)->impl->get_call_abi((tp), (abi)))
549 #define arch_env_get_params(env) ((env)->impl->get_params())
550 #define arch_env_parse_asm_constraint(env,c) ((env)->impl->parse_asm_constraint((c))
551 #define arch_env_is_valid_clobber(env,clobber) ((env)->impl->is_valid_clobber((clobber))
552 #define arch_env_mark_remat(env,node) \
553 do { if ((env)->impl->mark_remat != NULL) (env)->impl->mark_remat((node)); } while(0)
555 #define arch_env_new_spill(env,value,after) ((env)->impl->new_spill(value, after))
556 #define arch_env_new_reload(env,value,spilled,before) ((env)->impl->new_reload(value, spilled, before))
562 const arch_isa_if_t *impl;
563 unsigned n_registers; /**< number of registers */
564 const arch_register_t *registers; /**< register array */
565 unsigned n_register_classes; /**< number of register classes*/
566 const arch_register_class_t *register_classes; /**< register classes */
567 const arch_register_t *sp; /**< The stack pointer register. */
568 const arch_register_t *bp; /**< The base pointer register. */
569 int stack_alignment; /**< power of 2 stack alignment */
570 const be_main_env_t *main_env; /**< the be main environment */
571 int spill_cost; /**< cost for a be_Spill node */
572 int reload_cost; /**< cost for a be_Reload node */
573 bool custom_abi : 1; /**< backend does all abi handling
574 and does not need the generic
575 stuff from beabi.h/.c */
578 static inline bool arch_irn_is_ignore(const ir_node *irn)
580 const arch_register_req_t *req = arch_get_irn_register_req(irn);
581 return req->type & arch_register_req_type_ignore;
584 static inline bool arch_irn_consider_in_reg_alloc(
585 const arch_register_class_t *cls, const ir_node *node)
587 const arch_register_req_t *req = arch_get_irn_register_req(node);
590 !(req->type & arch_register_req_type_ignore);
594 * Iterate over all values defined by an instruction.
595 * Only looks at values in a certain register class where the requirements
596 * are not marked as ignore.
597 * Executes @p code for each definition.
599 #define be_foreach_definition_(node, ccls, value, code) \
601 if (get_irn_mode(node) == mode_T) { \
602 foreach_out_edge(node, edge_) { \
603 ir_node *const value = get_edge_src_irn(edge_); \
604 arch_register_req_t const *const req_ = arch_get_irn_register_req(value); \
605 if (req_->cls != ccls) \
610 arch_register_req_t const *const req_ = arch_get_irn_register_req(node); \
611 ir_node *const value = node; \
612 if (req_->cls == ccls) { \
618 #define be_foreach_definition(node, ccls, value, code) \
619 be_foreach_definition_(node, ccls, value, \
620 if (req_->type & arch_register_req_type_ignore) \
625 static inline const arch_register_class_t *arch_get_irn_reg_class(
628 const arch_register_req_t *req = arch_get_irn_register_req(node);
632 bool arch_reg_is_allocatable(const arch_register_req_t *req,
633 const arch_register_t *reg);