2 * Processor architecture specification.
3 * @author Sebastian Hack
22 #include "raw_bitset.h"
26 /* Initialize the architecture environment struct. */
27 arch_env_t *arch_env_init(arch_env_t *env, const arch_isa_if_t *isa_if, FILE *file_handle, be_main_env_t *main_env)
29 memset(env, 0, sizeof(*env));
30 env->isa = isa_if->init(file_handle);
31 env->isa->main_env = main_env;
35 arch_env_t *arch_env_push_irn_handler(arch_env_t *env,
36 const arch_irn_handler_t *handler)
38 assert(env->handlers_tos < ARCH_MAX_HANDLERS);
39 env->handlers[env->handlers_tos++] = handler;
43 const arch_irn_handler_t *arch_env_pop_irn_handler(arch_env_t *env)
45 assert(env->handlers_tos > 0 && env->handlers_tos <= ARCH_MAX_HANDLERS);
46 return env->handlers[--env->handlers_tos];
49 static const arch_irn_ops_t *fallback_irn_ops = NULL;
51 int arch_register_class_put(const arch_register_class_t *cls, bitset_t *bs)
55 for(i = 0, n = cls->n_regs; i < n; ++i)
63 * Get the isa responsible for a node.
64 * @param env The arch environment with the isa stack.
65 * @param irn The node to get the responsible isa for.
66 * @return The irn operations given by the responsible isa.
68 static INLINE const arch_irn_ops_t *
69 get_irn_ops(const arch_env_t *env, const ir_node *irn)
73 for(i = env->handlers_tos - 1; i >= 0; --i) {
74 const arch_irn_handler_t *handler = env->handlers[i];
75 const arch_irn_ops_t *ops = handler->get_irn_ops(handler, irn);
81 return fallback_irn_ops;
84 const arch_irn_ops_t *arch_get_irn_ops(const arch_env_t *env, const ir_node *irn) {
85 return get_irn_ops(env, irn);
88 const arch_register_req_t *arch_get_register_req(const arch_env_t *env,
89 const ir_node *irn, int pos)
91 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
92 return ops->impl->get_irn_reg_req(ops, irn, pos);
95 void arch_set_frame_offset(const arch_env_t *env, ir_node *irn, int offset)
97 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
98 ops->impl->set_frame_offset(ops, irn, offset);
101 ir_entity *arch_get_frame_entity(const arch_env_t *env, ir_node *irn)
103 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
104 return ops->impl->get_frame_entity(ops, irn);
107 void arch_set_frame_entity(const arch_env_t *env, ir_node *irn, ir_entity *ent)
109 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
110 ops->impl->set_frame_entity(ops, irn, ent);
113 int arch_get_sp_bias(const arch_env_t *env, ir_node *irn)
115 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
116 return ops->impl->get_sp_bias(ops, irn);
119 arch_inverse_t *arch_get_inverse(const arch_env_t *env, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obstack)
121 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
123 if(ops->impl->get_inverse) {
124 return ops->impl->get_inverse(ops, irn, i, inverse, obstack);
130 int arch_possible_memory_operand(const arch_env_t *env, const ir_node *irn, unsigned int i) {
131 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
133 if(ops->impl->possible_memory_operand) {
134 return ops->impl->possible_memory_operand(ops, irn, i);
140 void arch_perform_memory_operand(const arch_env_t *env, ir_node *irn, ir_node *spill, unsigned int i) {
141 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
143 if(ops->impl->perform_memory_operand) {
144 ops->impl->perform_memory_operand(ops, irn, spill, i);
150 int arch_get_op_estimated_cost(const arch_env_t *env, const ir_node *irn)
152 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
154 if(ops->impl->get_op_estimated_cost) {
155 return ops->impl->get_op_estimated_cost(ops, irn);
161 int arch_is_possible_memory_operand(const arch_env_t *env, const ir_node *irn, int i)
163 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
165 if(ops->impl->possible_memory_operand) {
166 return ops->impl->possible_memory_operand(ops, irn, i);
172 int arch_get_allocatable_regs(const arch_env_t *env, const ir_node *irn, int pos, bitset_t *bs)
174 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
175 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, irn, pos);
177 if(req->type == arch_register_req_type_none) {
178 bitset_clear_all(bs);
182 if(arch_register_req_is(req, limited)) {
183 rbitset_copy_to_bitset(req->limited, bs);
184 return bitset_popcnt(bs);
187 arch_register_class_put(req->cls, bs);
188 return req->cls->n_regs;
191 void arch_put_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls, bitset_t *bs)
195 for(i = 0; i < cls->n_regs; ++i) {
196 if(!arch_register_type_is(&cls->regs[i], ignore))
201 int arch_count_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls)
206 for(i = 0; i < cls->n_regs; ++i) {
207 if(!arch_register_type_is(&cls->regs[i], ignore))
214 int arch_is_register_operand(const arch_env_t *env,
215 const ir_node *irn, int pos)
217 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
218 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, irn, pos);
223 int arch_reg_is_allocatable(const arch_env_t *env, const ir_node *irn,
224 int pos, const arch_register_t *reg)
226 const arch_register_req_t *req;
228 req = arch_get_register_req(env, irn, pos);
230 if(req->type == arch_register_req_type_none)
233 if(arch_register_req_is(req, limited)) {
234 assert(arch_register_get_class(reg) == req->cls);
235 return rbitset_is_set(req->limited, arch_register_get_index(reg));
238 return req->cls == reg->reg_class;
241 const arch_register_class_t *
242 arch_get_irn_reg_class(const arch_env_t *env, const ir_node *irn, int pos)
244 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
245 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, irn, pos);
247 assert(req->type != arch_register_req_type_none || req->cls == NULL);
252 extern const arch_register_t *
253 arch_get_irn_register(const arch_env_t *env, const ir_node *irn)
255 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
256 return ops->impl->get_irn_reg(ops, irn);
259 extern void arch_set_irn_register(const arch_env_t *env,
260 ir_node *irn, const arch_register_t *reg)
262 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
263 ops->impl->set_irn_reg(ops, irn, reg);
266 extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn)
268 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
269 return ops->impl->classify(ops, irn);
272 extern arch_irn_flags_t arch_irn_get_flags(const arch_env_t *env, const ir_node *irn)
274 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
275 return ops->impl->get_flags(ops, irn);
278 extern const char *arch_irn_flag_str(arch_irn_flags_t fl)
281 #define XXX(x) case arch_irn_flags_ ## x: return #x;
284 XXX(rematerializable);
292 extern char *arch_register_req_format(char *buf, size_t len,
293 const arch_register_req_t *req,
297 snprintf(buf, len, "class: %s", req->cls->name);
299 if(arch_register_req_is(req, limited)) {
300 unsigned n_regs = req->cls->n_regs;
303 strncat(buf, " limited:", len);
304 for(i = 0; i < n_regs; ++i) {
305 if(rbitset_is_set(req->limited, i)) {
306 const arch_register_t *reg = &req->cls->regs[i];
307 strncat(buf, " ", len);
308 strncat(buf, reg->name, len);
313 if(arch_register_req_is(req, should_be_same)) {
314 const ir_node *same = get_irn_n(node, req->other_same);
315 ir_snprintf(tmp, sizeof(tmp), " same to: %+F", same);
316 strncat(buf, tmp, len);
319 if(arch_register_req_is(req, should_be_different)) {
320 const ir_node *different = get_irn_n(node, req->other_different);
321 ir_snprintf(tmp, sizeof(tmp), " different to: %+F", different);
322 strncat(buf, tmp, len);
328 static const arch_register_req_t no_requirement = {
329 arch_register_req_type_none,
335 const arch_register_req_t *arch_no_register_req = &no_requirement;