2 * Processor architecture specification.
3 * @author Sebastian Hack
30 arch_env_t *arch_env_init(arch_env_t *env, const arch_isa_if_t *isa_if)
32 memset(env, 0, sizeof(*env));
33 env->isa = isa_if->init();
37 arch_env_t *arch_env_add_irn_handler(arch_env_t *env,
38 const arch_irn_handler_t *handler)
40 assert(env->handlers_tos <= ARCH_MAX_HANDLERS);
41 env->handlers[env->handlers_tos++] = handler;
45 static const arch_irn_ops_t *fallback_irn_ops = NULL;
47 int arch_register_class_put(const arch_register_class_t *cls, bitset_t *bs)
51 for(i = 0, n = cls->n_regs; i < n; ++i)
59 * Get the isa responsible for a node.
60 * @param env The arch environment with the isa stack.
61 * @param irn The node to get the responsible isa for.
62 * @return The irn operations given by the responsible isa.
64 static INLINE const arch_irn_ops_t *
65 get_irn_ops(const arch_env_t *env, const ir_node *irn)
69 for(i = env->handlers_tos - 1; i >= 0; --i) {
70 const arch_irn_handler_t *handler = env->handlers[i];
71 const arch_irn_ops_t *ops = handler->get_irn_ops(handler, irn);
77 return fallback_irn_ops;
80 const arch_register_req_t *arch_get_register_req(const arch_env_t *env,
81 arch_register_req_t *req, const ir_node *irn, int pos)
83 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
84 req->type = arch_register_req_type_none;
85 return ops->impl->get_irn_reg_req(ops, req, irn, pos);
88 int arch_get_allocatable_regs(const arch_env_t *env, const ir_node *irn, int pos, bitset_t *bs)
90 arch_register_req_t local_req;
91 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
92 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, &local_req, irn, pos);
94 if(req->type == arch_register_req_type_none) {
99 if(arch_register_req_is(req, limited)) {
100 req->limited(req->limited_env, bs);
101 return bitset_popcnt(bs);
104 arch_register_class_put(req->cls, bs);
105 return req->cls->n_regs;
108 void arch_put_non_ignore_regs(const arch_env_t *env, const arch_register_class_t *cls, bitset_t *bs)
112 for(i = 0; i < cls->n_regs; ++i) {
113 if(!arch_register_type_is(&cls->regs[i], ignore))
118 int arch_is_register_operand(const arch_env_t *env,
119 const ir_node *irn, int pos)
121 arch_register_req_t local_req;
122 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
123 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, &local_req, irn, pos);
127 int arch_reg_is_allocatable(const arch_env_t *env, const ir_node *irn,
128 int pos, const arch_register_t *reg)
130 arch_register_req_t req;
132 arch_get_register_req(env, &req, irn, pos);
134 if(req.type == arch_register_req_type_none)
137 if(arch_register_req_is(&req, limited)) {
138 bitset_t *bs = bitset_alloca(req.cls->n_regs);
139 req.limited(req.limited_env, bs);
140 return bitset_is_set(bs, arch_register_get_index(reg));
143 return req.cls == reg->reg_class;
146 const arch_register_class_t *
147 arch_get_irn_reg_class(const arch_env_t *env, const ir_node *irn, int pos)
149 arch_register_req_t local_req;
150 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
151 const arch_register_req_t *req = ops->impl->get_irn_reg_req(ops, &local_req, irn, pos);
152 return req ? req->cls : NULL;
155 extern const arch_register_t *
156 arch_get_irn_register(const arch_env_t *env, const ir_node *irn)
158 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
159 return ops->impl->get_irn_reg(ops, irn);
162 extern void arch_set_irn_register(const arch_env_t *env,
163 ir_node *irn, const arch_register_t *reg)
165 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
166 ops->impl->set_irn_reg(ops, irn, reg);
169 extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn)
171 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
172 return ops->impl->classify(ops, irn);
175 extern arch_irn_flags_t arch_irn_get_flags(const arch_env_t *env, const ir_node *irn)
177 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
178 return ops->impl->get_flags(ops, irn);