2 * Processor architecture specification.
3 * @author Sebastian Hack
29 arch_env_t *arch_env_init(arch_env_t *env, const arch_isa_if_t *isa_if, FILE *file_handle)
31 memset(env, 0, sizeof(*env));
32 env->isa = isa_if->init(file_handle);
36 arch_env_t *arch_env_add_irn_handler(arch_env_t *env,
37 const arch_irn_handler_t *handler)
39 assert(env->handlers_tos <= ARCH_MAX_HANDLERS);
40 env->handlers[env->handlers_tos++] = handler;
44 static const arch_irn_ops_t *fallback_irn_ops = NULL;
46 int arch_register_class_put(const arch_register_class_t *cls, bitset_t *bs)
50 for(i = 0, n = cls->n_regs; i < n; ++i)
58 * Get the isa responsible for a node.
59 * @param env The arch environment with the isa stack.
60 * @param irn The node to get the responsible isa for.
61 * @return The irn operations given by the responsible isa.
63 static INLINE const arch_irn_ops_t *
64 get_irn_ops(const arch_env_t *env, const ir_node *irn)
68 for(i = env->handlers_tos - 1; i >= 0; --i) {
69 const arch_irn_handler_t *handler = env->handlers[i];
70 const arch_irn_ops_t *ops = handler->get_irn_ops(handler, irn);
76 return fallback_irn_ops;
79 const arch_register_req_t *arch_get_register_req(const arch_env_t *env,
80 arch_register_req_t *req, const ir_node *irn, int pos)
82 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
83 return ops->get_irn_reg_req(ops, req, irn, pos);
86 int arch_get_allocatable_regs(const arch_env_t *env, const ir_node *irn,
87 int pos, const arch_register_class_t *cls, bitset_t *bs)
89 arch_register_req_t local_req;
90 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
91 const arch_register_req_t *req = ops->get_irn_reg_req(ops, &local_req, irn, pos);
94 case arch_register_req_type_normal:
95 arch_register_class_put(req->cls, bs);
96 return req->cls->n_regs;
98 case arch_register_req_type_limited:
99 return req->data.limited(irn, pos, bs);
102 assert(0 && "This register requirement case is not covered");
108 int arch_is_register_operand(const arch_env_t *env,
109 const ir_node *irn, int pos)
111 arch_register_req_t local_req;
112 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
113 const arch_register_req_t *req = ops->get_irn_reg_req(ops, &local_req, irn, pos);
117 int arch_reg_is_allocatable(const arch_env_t *env, const ir_node *irn,
118 int pos, const arch_register_t *reg)
121 arch_register_req_t req;
123 arch_get_register_req(env, &req, irn, pos);
125 case arch_register_req_type_normal:
126 res = req.cls == reg->reg_class;
128 case arch_register_req_type_limited:
130 bitset_t *bs = bitset_alloca(req.cls->n_regs);
131 req.data.limited(irn, pos, bs);
132 res = bitset_is_set(bs, arch_register_get_index(reg));
141 const arch_register_class_t *
142 arch_get_irn_reg_class(const arch_env_t *env, const ir_node *irn, int pos)
144 arch_register_req_t local_req;
145 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
146 const arch_register_req_t *req = ops->get_irn_reg_req(ops, &local_req, irn, pos);
147 return req ? req->cls : NULL;
150 extern const arch_register_t *
151 arch_get_irn_register(const arch_env_t *env, const ir_node *irn)
153 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
154 return ops->get_irn_reg(ops, irn);
157 extern void arch_set_irn_register(const arch_env_t *env,
158 ir_node *irn, const arch_register_t *reg)
160 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
161 ops->set_irn_reg(ops, irn, reg);
164 extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn)
166 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
167 return ops->classify(ops, irn);
170 extern arch_irn_flags_t arch_irn_get_flags(const arch_env_t *env, const ir_node *irn)
172 const arch_irn_ops_t *ops = get_irn_ops(env, irn);
173 return ops->get_flags(ops, irn);