2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Processor architecture specification.
23 * @author Sebastian Hack
25 #ifndef FIRM_BE_BEARCH_H
26 #define FIRM_BE_BEARCH_H
30 #include "firm_types.h"
31 #include "raw_bitset.h"
38 * this constant is returned by the get_sp_bias functions if the stack
39 * is reset (usually because the frame pointer is copied to the stack
42 #define SP_BIAS_RESET INT_MIN
44 typedef enum arch_register_class_flags_t {
45 arch_register_class_flag_none = 0,
46 /** don't do automatic register allocation for this class */
47 arch_register_class_flag_manual_ra = 1U << 0,
48 /** the register models an abstract state (example: fpu rounding mode) */
49 arch_register_class_flag_state = 1U << 1
50 } arch_register_class_flags_t;
51 ENUM_BITSET(arch_register_class_flags_t)
53 typedef enum arch_register_type_t {
54 arch_register_type_none = 0,
55 /** Do not consider this register when allocating. */
56 arch_register_type_ignore = 1U << 0,
57 /** This is just a virtual register. Virtual registers fulfill any register
58 * constraints as long as the register class matches. It is a allowed to
59 * have multiple definitions for the same virtual register at a point */
60 arch_register_type_virtual = 1U << 1,
61 /** The register represents a state that should be handled by bestate
63 arch_register_type_state = 1U << 2,
64 } arch_register_type_t;
65 ENUM_BITSET(arch_register_type_t)
68 * Different types of register allocation requirements.
70 typedef enum arch_register_req_type_t {
71 /** No register requirement. */
72 arch_register_req_type_none = 0,
73 /** All registers in the class are allowed. */
74 arch_register_req_type_normal = 1U << 0,
75 /** Only a real subset of the class is allowed. */
76 arch_register_req_type_limited = 1U << 1,
77 /** The register should be equal to another one at the node. */
78 arch_register_req_type_should_be_same = 1U << 2,
79 /** The register must be unequal from some other at the node. */
80 arch_register_req_type_must_be_different = 1U << 3,
81 /** The registernumber should be aligned (in case of multiregister values)*/
82 arch_register_req_type_aligned = 1U << 4,
83 /** ignore while allocating registers */
84 arch_register_req_type_ignore = 1U << 5,
85 /** the output produces a new value for the stack pointer
86 * (this is not really a constraint but a marker to guide the stackpointer
88 arch_register_req_type_produces_sp = 1U << 6,
89 } arch_register_req_type_t;
90 ENUM_BITSET(arch_register_req_type_t)
92 extern arch_register_req_t const arch_no_requirement;
93 #define arch_no_register_req (&arch_no_requirement)
95 void arch_dump_register_reqs(FILE *F, const ir_node *node);
96 void arch_dump_reqs_and_registers(FILE *F, const ir_node *node);
98 void arch_set_frame_offset(ir_node *irn, int bias);
100 ir_entity *arch_get_frame_entity(const ir_node *irn);
101 int arch_get_sp_bias(ir_node *irn);
103 int arch_get_op_estimated_cost(const ir_node *irn);
104 int arch_possible_memory_operand(const ir_node *irn,
106 void arch_perform_memory_operand(ir_node *irn, ir_node *spill,
110 * Get the register allocated for a value.
112 const arch_register_t *arch_get_irn_register(const ir_node *irn);
115 * Assign register to a value
117 void arch_set_irn_register(ir_node *irn, const arch_register_t *reg);
120 * Set the register for a certain output operand.
122 void arch_set_irn_register_out(ir_node *irn, unsigned pos, const arch_register_t *r);
124 const arch_register_t *arch_get_irn_register_out(const ir_node *irn, unsigned pos);
125 const arch_register_t *arch_get_irn_register_in(const ir_node *irn, int pos);
128 * Get register constraints for an operand at position @p
130 static inline const arch_register_req_t *arch_get_irn_register_req_in(
131 const ir_node *node, int pos)
133 const backend_info_t *info = be_get_info(node);
134 return info->in_reqs[pos];
138 * Get register constraint for a produced result (the @p pos result)
140 static inline const arch_register_req_t *arch_get_irn_register_req_out(
141 const ir_node *node, unsigned pos)
143 const backend_info_t *info = be_get_info(node);
144 return info->out_infos[pos].req;
147 static inline void arch_set_irn_register_req_out(ir_node *node, unsigned pos,
148 const arch_register_req_t *req)
150 backend_info_t *info = be_get_info(node);
151 assert(pos < (unsigned)ARR_LEN(info->out_infos));
152 info->out_infos[pos].req = req;
155 static inline void arch_set_irn_register_reqs_in(ir_node *node,
156 const arch_register_req_t **reqs)
158 backend_info_t *info = be_get_info(node);
159 info->in_reqs = reqs;
162 static inline const arch_register_req_t **arch_get_irn_register_reqs_in(
165 backend_info_t *info = be_get_info(node);
166 return info->in_reqs;
169 static inline reg_out_info_t *get_out_info(const ir_node *node)
172 const backend_info_t *info;
173 assert(get_irn_mode(node) != mode_T);
175 pos = get_Proj_proj(node);
176 node = get_Proj_pred(node);
179 info = be_get_info(node);
180 assert(pos < ARR_LEN(info->out_infos));
181 return &info->out_infos[pos];
184 static inline const arch_register_req_t *arch_get_irn_register_req(const ir_node *node)
186 reg_out_info_t *out = get_out_info(node);
191 * Get the flags of a node.
192 * @param irn The node.
195 static inline arch_irn_flags_t arch_get_irn_flags(const ir_node *node)
197 backend_info_t const *const info = be_get_info(node);
201 void arch_set_irn_flags(ir_node *node, arch_irn_flags_t flags);
202 void arch_add_irn_flags(ir_node *node, arch_irn_flags_t flags);
204 #define arch_irn_is(irn, flag) ((arch_get_irn_flags(irn) & arch_irn_flags_ ## flag) != 0)
206 static inline unsigned arch_get_irn_n_outs(const ir_node *node)
208 backend_info_t *const info = be_get_info(node);
209 return (unsigned)ARR_LEN(info->out_infos);
212 #define be_foreach_out(node, i) \
213 for (unsigned i = 0, i##__n = arch_get_irn_n_outs(node); i != i##__n; ++i)
216 * Register an instruction set architecture
218 void be_register_isa_if(const char *name, const arch_isa_if_t *isa);
223 struct arch_register_t {
224 const char *name; /**< The name of the register. */
225 const arch_register_class_t *reg_class; /**< The class of the register */
226 unsigned short index; /**< The index of the register in
228 unsigned short global_index; /**< The global index this
229 register in the architecture. */
230 arch_register_type_t type; /**< The type of the register. */
231 /** register constraint allowing just this register */
232 const arch_register_req_t *single_req;
233 /** register number in dwarf debugging format */
234 unsigned short dwarf_number;
238 * A class of registers.
239 * Like general purpose or floating point.
241 struct arch_register_class_t {
242 unsigned index; /**< index of this register class */
243 const char *name; /**< The name of the register class.*/
244 unsigned n_regs; /**< Number of registers in this
246 ir_mode *mode; /**< The mode of the register class.*/
247 const arch_register_t *regs; /**< The array of registers. */
248 arch_register_class_flags_t flags; /**< register class flags. */
249 const arch_register_req_t *class_req;
252 /** return the number of registers in this register class */
253 #define arch_register_class_n_regs(cls) ((cls)->n_regs)
255 /** return the largest mode of this register class */
256 #define arch_register_class_mode(cls) ((cls)->mode)
258 /** return the name of this register class */
259 #define arch_register_class_name(cls) ((cls)->name)
261 /** return the index of this register class */
262 #define arch_register_class_index(cls) ((cls)->index)
264 /** return the register class flags */
265 #define arch_register_class_flags(cls) ((cls)->flags)
267 static inline const arch_register_t *arch_register_for_index(
268 const arch_register_class_t *cls, unsigned idx)
270 assert(idx < cls->n_regs);
271 return &cls->regs[idx];
275 * Convenience macro to check for set constraints.
276 * @param req A pointer to register requirements.
277 * @param kind The kind of constraint to check for
278 * (see arch_register_req_type_t).
279 * @return 1, If the kind of constraint is present, 0 if not.
281 #define arch_register_req_is(req, kind) \
282 (((req)->type & (arch_register_req_type_ ## kind)) != 0)
285 * Expresses requirements to register allocation for an operand.
287 struct arch_register_req_t {
288 arch_register_req_type_t type; /**< The type of the constraint. */
289 const arch_register_class_t *cls; /**< The register class this constraint
291 const unsigned *limited; /**< allowed register bitset
292 (in case of wide-values this is
293 only about the first register) */
294 unsigned other_same; /**< Bitmask of ins which should use the
295 same register (should_be_same). */
296 unsigned other_different; /**< Bitmask of ins which shall use a
298 (must_be_different) */
299 unsigned char width; /**< specifies how many sequential
300 registers are required */
303 static inline bool reg_reqs_equal(const arch_register_req_t *req1,
304 const arch_register_req_t *req2)
309 if (req1->type != req2->type ||
310 req1->cls != req2->cls ||
311 req1->other_same != req2->other_same ||
312 req1->other_different != req2->other_different ||
313 (req1->limited != NULL) != (req2->limited != NULL))
316 if (req1->limited != NULL) {
317 size_t const n_regs = arch_register_class_n_regs(req1->cls);
318 if (!rbitsets_equal(req1->limited, req2->limited, n_regs))
325 struct arch_irn_ops_t {
328 * Get the entity on the stack frame this node depends on.
329 * @param irn The node in question.
330 * @return The entity on the stack frame or NULL, if the node does not have
331 * a stack frame entity.
333 ir_entity *(*get_frame_entity)(const ir_node *irn);
336 * Set the offset of a node carrying an entity on the stack frame.
337 * @param irn The node.
338 * @param offset The offset of the node's stack frame entity.
340 void (*set_frame_offset)(ir_node *irn, int offset);
343 * Returns the delta of the stackpointer for nodes that increment or
344 * decrement the stackpointer with a constant value. (push, pop
345 * nodes on most architectures).
346 * A positive value stands for an expanding stack area, a negative value for
349 * @param irn The node
350 * @return 0 if the stackpointer is not modified with a constant
351 * value, otherwise the increment/decrement value
353 int (*get_sp_bias)(const ir_node *irn);
356 * Get the estimated cycle count for @p irn.
358 * @param irn The node.
359 * @return The estimated cycle count for this operation
361 int (*get_op_estimated_cost)(const ir_node *irn);
364 * Asks the backend whether operand @p i of @p irn can be loaded form memory
367 * @param irn The node.
368 * @param i Index of the argument we would like to know whether @p irn
369 * can load it form memory internally
370 * @return nonzero if argument can be loaded or zero otherwise
372 int (*possible_memory_operand)(const ir_node *irn, unsigned int i);
375 * Ask the backend to assimilate @p reload of operand @p i into @p irn.
377 * @param irn The node.
378 * @param spill The spill.
379 * @param i The position of the reload.
381 void (*perform_memory_operand)(ir_node *irn, ir_node *spill,
386 * Architecture interface.
388 struct arch_isa_if_t {
390 * Initializes the isa interface. This is necessary before calling any
391 * other functions from this interface.
396 * Fress resources allocated by this isa interface.
398 void (*finish)(void);
401 * Returns the frontend settings needed for this backend.
403 const backend_params *(*get_params)(void);
406 * lowers current program for target. See the documentation for
407 * be_lower_for_target() for details.
409 void (*lower_for_target)(void);
412 * parse an assembler constraint part and set flags according to its nature
413 * advances the *c pointer to point to the last parsed character (so if you
414 * parse a single character don't advance c)
416 asm_constraint_flags_t (*parse_asm_constraint)(const char **c);
419 * returns true if the string is a valid clobbered (register) in this
422 int (*is_valid_clobber)(const char *clobber);
425 * Start codegeneration
426 * @return a new isa instance
428 arch_env_t *(*begin_codegeneration)(void);
431 * Free the isa instance.
433 void (*end_codegeneration)(void *self);
436 * Initialize the code generator for a graph
439 void (*init_graph)(ir_graph *irg);
442 * Get the ABI restrictions for procedure calls.
443 * @param call_type The call type of the method (procedure) in question.
444 * @param p The array of parameter locations to be filled.
446 void (*get_call_abi)(ir_type *call_type, be_abi_call_t *abi);
449 * mark node as rematerialized
451 void (*mark_remat)(ir_node *node);
454 * return node used as base in pic code addresses
456 ir_node* (*get_pic_base)(ir_graph *irg);
459 * Create a spill instruction. We assume that spill instructions
460 * do not need any additional registers and do not affect cpu-flags in any
462 * Construct a sequence of instructions after @p after (the resulting nodes
463 * are already scheduled).
464 * Returns a mode_M value which is used as input for a reload instruction.
466 ir_node *(*new_spill)(ir_node *value, ir_node *after);
469 * Create a reload instruction. We assume that reload instructions do not
470 * need any additional registers and do not affect cpu-flags in any way.
471 * Constructs a sequence of instruction before @p before (the resulting
472 * nodes are already scheduled). A rewiring of users is not performed in
474 * Returns a value representing the restored value.
476 ir_node *(*new_reload)(ir_node *value, ir_node *spilled_value,
480 * Checks if the given register is callee/caller saved.
481 * @deprecated, only necessary if backend still uses beabi functions
483 int (*register_saved_by)(const arch_register_t *reg, int callee);
486 * Called directly after initialization. Backend should handle all
489 void (*handle_intrinsics)(void);
492 * Called before abi introduce.
494 void (*before_abi)(ir_graph *irg);
497 * Called, when the graph is being normalized.
499 void (*prepare_graph)(ir_graph *irg);
502 * Called before register allocation.
504 void (*before_ra)(ir_graph *irg);
507 * Called directly before done is called. This should be the last place
508 * where the irg is modified.
510 void (*finish_graph)(ir_graph *irg);
513 * Called after everything happened. This call should emit the final
514 * assembly code but avoid changing the irg.
516 void (*emit)(ir_graph *irg);
519 #define arch_env_end_codegeneration(env) ((env)->impl->end_codegeneration(env))
520 #define arch_env_handle_intrinsics(env) \
521 do { if((env)->impl->handle_intrinsics != NULL) (env)->impl->handle_intrinsics(); } while(0)
522 #define arch_env_get_call_abi(env,tp,abi) ((env)->impl->get_call_abi((tp), (abi)))
523 #define arch_env_mark_remat(env,node) \
524 do { if ((env)->impl->mark_remat != NULL) (env)->impl->mark_remat((node)); } while(0)
526 #define arch_env_new_spill(env,value,after) ((env)->impl->new_spill(value, after))
527 #define arch_env_new_reload(env,value,spilled,before) ((env)->impl->new_reload(value, spilled, before))
533 const arch_isa_if_t *impl;
534 unsigned n_registers; /**< number of registers */
535 const arch_register_t *registers; /**< register array */
536 unsigned n_register_classes; /**< number of register classes*/
537 const arch_register_class_t *register_classes; /**< register classes */
538 const arch_register_t *sp; /**< The stack pointer register. */
539 const arch_register_t *bp; /**< The base pointer register. */
540 int stack_alignment; /**< power of 2 stack alignment */
541 int spill_cost; /**< cost for a be_Spill node */
542 int reload_cost; /**< cost for a be_Reload node */
543 bool custom_abi : 1; /**< backend does all abi handling
544 and does not need the generic
545 stuff from beabi.h/.c */
548 static inline bool arch_irn_is_ignore(const ir_node *irn)
550 const arch_register_req_t *req = arch_get_irn_register_req(irn);
551 return arch_register_req_is(req, ignore);
554 static inline bool arch_irn_consider_in_reg_alloc(
555 const arch_register_class_t *cls, const ir_node *node)
557 const arch_register_req_t *req = arch_get_irn_register_req(node);
558 return req->cls == cls && !arch_register_req_is(req, ignore);
561 #define be_foreach_value(node, value, code) \
563 if (get_irn_mode(node) == mode_T) { \
564 foreach_out_edge(node, node##__edge) { \
565 ir_node *const value = get_edge_src_irn(node##__edge); \
566 if (!is_Proj(value)) \
571 ir_node *const value = node; \
577 * Iterate over all values defined by an instruction.
578 * Only looks at values in a certain register class where the requirements
579 * are not marked as ignore.
580 * Executes @p code for each definition.
582 #define be_foreach_definition_(node, ccls, value, req, code) \
583 be_foreach_value(node, value, \
584 arch_register_req_t const *const req = arch_get_irn_register_req(value); \
585 if (req->cls != ccls) \
590 #define be_foreach_definition(node, ccls, value, req, code) \
591 be_foreach_definition_(node, ccls, value, req, \
592 if (arch_register_req_is(req, ignore)) \
597 #define be_foreach_use(node, ccls, in_req, value, value_req, code) \
599 for (int i_ = 0, n_ = get_irn_arity(node); i_ < n_; ++i_) { \
600 const arch_register_req_t *in_req = arch_get_irn_register_req_in(node, i_); \
601 if (in_req->cls != ccls) \
603 ir_node *value = get_irn_n(node, i_); \
604 const arch_register_req_t *value_req = arch_get_irn_register_req(value); \
605 if (value_req->type & arch_register_req_type_ignore) \
611 static inline const arch_register_class_t *arch_get_irn_reg_class(
614 const arch_register_req_t *req = arch_get_irn_register_req(node);
618 bool arch_reg_is_allocatable(const arch_register_req_t *req,
619 const arch_register_t *reg);