2 * Processor architecture specification.
3 * @author Sebastian Hack
32 arch_env_t *arch_env_init(arch_env_t *env, const arch_isa_if_t *isa_if)
34 memset(env, 0, sizeof(*env));
35 env->isa = isa_if->init();
39 arch_env_t *arch_env_push_irn_handler(arch_env_t *env,
40 const arch_irn_handler_t *handler)
42 assert(env->handlers_tos <= ARCH_MAX_HANDLERS);
43 env->handlers[env->handlers_tos++] = handler;
47 const arch_irn_handler_t *arch_env_pop_irn_handler(arch_env_t *env)
49 assert(env->handlers_tos > 0 && env->handlers_tos <= ARCH_MAX_HANDLERS);
50 return env->handlers[--env->handlers_tos];
53 static const arch_irn_ops_t *fallback_irn_ops = NULL;
55 int arch_register_class_put(const arch_register_class_t *cls, bitset_t *bs)
59 for(i = 0, n = cls->n_regs; i < n; ++i)
67 * Get the isa responsible for a node.
68 * @param env The arch environment with the isa stack.
69 * @param irn The node to get the responsible isa for.
70 * @return The irn operations given by the responsible isa.
72 static INLINE const arch_irn_ops_t *
73 get_irn_ops(const arch_env_t *env, const ir_node *irn)
77 for(i = env->handlers_tos - 1; i >= 0; --i) {
78 const arch_irn_handler_t *handler = env->handlers[i];
79 const arch_irn_ops_t *ops = handler->get_irn_ops(handler, irn);
85 return fallback_irn_ops;
88 const arch_register_req_t *arch_get_register_req(const arch_env_t *env,
89 arch_register_req_t *req, const ir_node *irn, int pos)
91 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
92 req->type = arch_register_req_type_none;
93 return ops->impl->get_irn_reg_req(ops, req, irn, pos);
96 void arch_set_frame_offset(const arch_env_t *env, ir_node *irn, int offset)
98 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
99 ops->impl->set_frame_offset(ops, irn, offset);
102 entity *arch_get_frame_entity(const arch_env_t *env, ir_node *irn)
104 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
105 return ops->impl->get_frame_entity(ops, irn);
109 int arch_get_allocatable_regs(const arch_env_t *env, const ir_node *irn, int pos, bitset_t *bs)
111 arch_register_req_t local_req;
112 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
113 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, &local_req, irn, pos);
115 if(req->type == arch_register_req_type_none) {
116 bitset_clear_all(bs);
120 if(arch_register_req_is(req, limited)) {
121 req->limited(req->limited_env, bs);
122 return bitset_popcnt(bs);
125 arch_register_class_put(req->cls, bs);
126 return req->cls->n_regs;
129 void arch_put_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls, bitset_t *bs)
133 for(i = 0; i < cls->n_regs; ++i) {
134 if(!arch_register_type_is(&cls->regs[i], ignore))
139 int arch_is_register_operand(const arch_env_t *env,
140 const ir_node *irn, int pos)
142 arch_register_req_t local_req;
143 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
144 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, &local_req, irn, pos);
148 int arch_reg_is_allocatable(const arch_env_t *env, const ir_node *irn,
149 int pos, const arch_register_t *reg)
151 arch_register_req_t req;
153 arch_get_register_req(env, &req, irn, pos);
155 if(req.type == arch_register_req_type_none)
158 if(arch_register_req_is(&req, limited)) {
159 bitset_t *bs = bitset_alloca(req.cls->n_regs);
160 req.limited(req.limited_env, bs);
161 return bitset_is_set(bs, arch_register_get_index(reg));
164 return req.cls == reg->reg_class;
167 const arch_register_class_t *
168 arch_get_irn_reg_class(const arch_env_t *env, const ir_node *irn, int pos)
170 arch_register_req_t local_req;
171 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
172 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, &local_req, irn, pos);
173 return req ? req->cls : NULL;
176 extern const arch_register_t *
177 arch_get_irn_register(const arch_env_t *env, const ir_node *irn)
179 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
180 return ops->impl->get_irn_reg(ops, irn);
183 extern void arch_set_irn_register(const arch_env_t *env,
184 ir_node *irn, const arch_register_t *reg)
186 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
187 ops->impl->set_irn_reg(ops, irn, reg);
190 extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn)
192 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
193 return ops->impl->classify(ops, irn);
196 extern arch_irn_flags_t arch_irn_get_flags(const arch_env_t *env, const ir_node *irn)
198 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
199 return ops->impl->get_flags(ops, irn);
202 extern const char *arch_irn_flag_str(arch_irn_flags_t fl)
205 #define XXX(x) case arch_irn_flags_ ## x: return #x;
208 XXX(rematerializable);
215 extern char *arch_register_req_format(char *buf, size_t len, const arch_register_req_t *req)
218 snprintf(buf, len, "class: %s", req->cls->name);
220 if(arch_register_req_is(req, limited)) {
222 bitset_t *bs = bitset_alloca(req->cls->n_regs);
223 req->limited(req->limited_env, bs);
224 strncat(buf, " limited:", len);
225 bitset_foreach(bs, elm) {
226 strncat(buf, " ", len);
227 strncat(buf, req->cls->regs[elm].name, len);
231 if(arch_register_req_is(req, should_be_same)) {
232 ir_snprintf(tmp, sizeof(tmp), " same to: %+F", req->other_different);
233 strncat(buf, tmp, len);
236 if(arch_register_req_is(req, should_be_different)) {
237 ir_snprintf(tmp, sizeof(tmp), " different to: %+F", req->other_different);
238 strncat(buf, tmp, len);