2 * Processor architecture specification.
3 * @author Sebastian Hack
33 /* Initialize the architecture environment struct. */
34 arch_env_t *arch_env_init(arch_env_t *env, const arch_isa_if_t *isa_if, FILE *file_handle, be_main_env_t *main_env)
36 memset(env, 0, sizeof(*env));
37 env->isa = isa_if->init(file_handle);
38 env->constructor_entities = pset_new_ptr(5);
39 env->isa->main_env = main_env;
43 arch_env_t *arch_env_push_irn_handler(arch_env_t *env,
44 const arch_irn_handler_t *handler)
46 assert(env->handlers_tos < ARCH_MAX_HANDLERS);
47 env->handlers[env->handlers_tos++] = handler;
51 const arch_irn_handler_t *arch_env_pop_irn_handler(arch_env_t *env)
53 assert(env->handlers_tos > 0 && env->handlers_tos <= ARCH_MAX_HANDLERS);
54 return env->handlers[--env->handlers_tos];
57 static const arch_irn_ops_t *fallback_irn_ops = NULL;
59 int arch_register_class_put(const arch_register_class_t *cls, bitset_t *bs)
63 for(i = 0, n = cls->n_regs; i < n; ++i)
71 * Get the isa responsible for a node.
72 * @param env The arch environment with the isa stack.
73 * @param irn The node to get the responsible isa for.
74 * @return The irn operations given by the responsible isa.
76 static INLINE const arch_irn_ops_t *
77 get_irn_ops(const arch_env_t *env, const ir_node *irn)
81 for(i = env->handlers_tos - 1; i >= 0; --i) {
82 const arch_irn_handler_t *handler = env->handlers[i];
83 const arch_irn_ops_t *ops = handler->get_irn_ops(handler, irn);
89 return fallback_irn_ops;
92 const arch_irn_ops_t *arch_get_irn_ops(const arch_env_t *env, const ir_node *irn) {
93 return get_irn_ops(env, irn);
96 const arch_register_req_t *arch_get_register_req(const arch_env_t *env,
97 arch_register_req_t *req, const ir_node *irn, int pos)
99 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
100 req->type = arch_register_req_type_none;
101 return ops->impl->get_irn_reg_req(ops, req, irn, pos);
104 void arch_set_frame_offset(const arch_env_t *env, ir_node *irn, int offset)
106 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
107 ops->impl->set_frame_offset(ops, irn, offset);
110 entity *arch_get_frame_entity(const arch_env_t *env, ir_node *irn)
112 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
113 return ops->impl->get_frame_entity(ops, irn);
116 void arch_set_frame_entity(const arch_env_t *env, ir_node *irn, entity *ent)
118 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
119 ops->impl->set_frame_entity(ops, irn, ent);
122 int arch_get_sp_bias(const arch_env_t *env, ir_node *irn)
124 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
125 return ops->impl->get_sp_bias(ops, irn);
128 arch_inverse_t *arch_get_inverse(const arch_env_t *env, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obstack)
130 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
131 if(ops->impl->get_inverse) {
132 return ops->impl->get_inverse(ops, irn, i, inverse, obstack);
138 int arch_possible_memory_operand(const arch_env_t *env, const ir_node *irn, unsigned int i) {
139 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
140 if(ops->impl->possible_memory_operand) {
141 return ops->impl->possible_memory_operand(ops, irn, i);
147 extern void arch_perform_memory_operand(const arch_env_t *env, ir_node *irn, ir_node *spill, unsigned int i) {
148 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
149 if(ops->impl->perform_memory_operand) {
150 ops->impl->perform_memory_operand(ops, irn, spill, i);
156 int arch_get_op_estimated_cost(const arch_env_t *env, const ir_node *irn)
158 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
159 if(ops->impl->get_op_estimated_cost) {
160 return ops->impl->get_op_estimated_cost(ops, irn);
166 int arch_is_possible_memory_operand(const arch_env_t *env, const ir_node *irn, int i)
168 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
169 if(ops->impl->possible_memory_operand) {
170 return ops->impl->possible_memory_operand(ops, irn, i);
176 int arch_get_allocatable_regs(const arch_env_t *env, const ir_node *irn, int pos, bitset_t *bs)
178 arch_register_req_t local_req;
179 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
180 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, &local_req, irn, pos);
182 if(req->type == arch_register_req_type_none) {
183 bitset_clear_all(bs);
187 if(arch_register_req_is(req, limited)) {
188 req->limited(req->limited_env, bs);
189 return bitset_popcnt(bs);
192 arch_register_class_put(req->cls, bs);
193 return req->cls->n_regs;
196 void arch_put_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls, bitset_t *bs)
200 for(i = 0; i < cls->n_regs; ++i) {
201 if(!arch_register_type_is(&cls->regs[i], ignore))
206 int arch_count_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls)
211 for(i = 0; i < cls->n_regs; ++i) {
212 if(!arch_register_type_is(&cls->regs[i], ignore))
219 int arch_is_register_operand(const arch_env_t *env,
220 const ir_node *irn, int pos)
222 arch_register_req_t local_req;
223 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
224 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, &local_req, irn, pos);
228 int arch_reg_is_allocatable(const arch_env_t *env, const ir_node *irn,
229 int pos, const arch_register_t *reg)
231 arch_register_req_t req;
233 arch_get_register_req(env, &req, irn, pos);
235 if(req.type == arch_register_req_type_none)
238 if(arch_register_req_is(&req, limited)) {
239 bitset_t *bs = bitset_alloca(req.cls->n_regs);
240 req.limited(req.limited_env, bs);
241 return bitset_is_set(bs, arch_register_get_index(reg));
244 return req.cls == reg->reg_class;
247 const arch_register_class_t *
248 arch_get_irn_reg_class(const arch_env_t *env, const ir_node *irn, int pos)
250 arch_register_req_t local_req;
251 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
252 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, &local_req, irn, pos);
253 return req ? req->cls : NULL;
256 extern const arch_register_t *
257 arch_get_irn_register(const arch_env_t *env, const ir_node *irn)
259 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
260 return ops->impl->get_irn_reg(ops, irn);
263 extern void arch_set_irn_register(const arch_env_t *env,
264 ir_node *irn, const arch_register_t *reg)
266 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
267 ops->impl->set_irn_reg(ops, irn, reg);
270 extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn)
272 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
273 return ops->impl->classify(ops, irn);
276 extern arch_irn_flags_t arch_irn_get_flags(const arch_env_t *env, const ir_node *irn)
278 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
279 return ops->impl->get_flags(ops, irn);
282 extern const char *arch_irn_flag_str(arch_irn_flags_t fl)
285 #define XXX(x) case arch_irn_flags_ ## x: return #x;
288 XXX(rematerializable);
296 extern char *arch_register_req_format(char *buf, size_t len, const arch_register_req_t *req)
299 snprintf(buf, len, "class: %s", req->cls->name);
301 if(arch_register_req_is(req, limited)) {
303 bitset_t *bs = bitset_alloca(req->cls->n_regs);
304 req->limited(req->limited_env, bs);
305 strncat(buf, " limited:", len);
306 bitset_foreach(bs, elm) {
307 strncat(buf, " ", len);
308 strncat(buf, req->cls->regs[elm].name, len);
312 if(arch_register_req_is(req, should_be_same)) {
313 ir_snprintf(tmp, sizeof(tmp), " same to: %+F", req->other_different);
314 strncat(buf, tmp, len);
317 if(arch_register_req_is(req, should_be_different)) {
318 ir_snprintf(tmp, sizeof(tmp), " different to: %+F", req->other_different);
319 strncat(buf, tmp, len);
325 int arch_ent_is_constructor(const arch_env_t *arch_env, const entity *ent) {
326 return pset_find_ptr(arch_env->constructor_entities, ent) != NULL;