2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the common parts of IR transformation from
23 * firm into ia32-Firm.
24 * @author Matthias Braun, Sebastian Buchwald
25 * @version $Id: ia32_common_transform.c 21012 2008-08-06 13:35:17Z beck $
35 #include "../betranshlp.h"
39 #include "ia32_architecture.h"
40 #include "ia32_common_transform.h"
41 #include "ia32_new_nodes.h"
43 #include "gen_ia32_new_nodes.h"
44 #include "gen_ia32_regalloc_if.h"
46 /** hold the current code generator during transformation */
47 ia32_code_gen_t *env_cg = NULL;
49 heights_t *heights = NULL;
51 static const arch_register_req_t no_register_req = {
52 arch_register_req_type_none,
54 NULL, /* limit bitset */
59 static int check_immediate_constraint(long val, char immediate_constraint_type)
61 switch (immediate_constraint_type) {
65 case 'I': return 0 <= val && val <= 31;
66 case 'J': return 0 <= val && val <= 63;
67 case 'K': return -128 <= val && val <= 127;
68 case 'L': return val == 0xff || val == 0xffff;
69 case 'M': return 0 <= val && val <= 3;
70 case 'N': return 0 <= val && val <= 255;
71 case 'O': return 0 <= val && val <= 127;
73 default: panic("Invalid immediate constraint found");
78 * Get a primitive type for a mode with alignment 16.
80 static ir_type *ia32_get_prim_type(pmap *types, ir_mode *mode)
82 ir_type *res = pmap_get(types, mode);
86 res = new_type_primitive(mode);
87 if (get_mode_size_bits(mode) >= 80) {
88 set_type_alignment_bytes(res, 16);
90 pmap_insert(types, mode, res);
94 ir_entity *create_float_const_entity(ir_node *cnst)
96 ia32_isa_t *isa = env_cg->isa;
97 tarval *tv = get_Const_tarval(cnst);
98 ir_entity *res = pmap_get(isa->tv_ent, tv);
99 ir_initializer_t *initializer;
106 mode = get_tarval_mode(tv);
108 if (! ia32_cg_config.use_sse2) {
109 /* try to reduce the mode to produce smaller sized entities */
110 if (mode != mode_F) {
111 if (tarval_ieee754_can_conv_lossless(tv, mode_F)) {
113 tv = tarval_convert_to(tv, mode);
114 } else if (mode != mode_D) {
115 if (tarval_ieee754_can_conv_lossless(tv, mode_D)) {
117 tv = tarval_convert_to(tv, mode);
123 tp = ia32_get_prim_type(isa->types, mode);
124 res = new_entity(get_glob_type(), id_unique("C%u"), tp);
125 set_entity_ld_ident(res, get_entity_ident(res));
126 set_entity_visibility(res, ir_visibility_private);
127 add_entity_linkage(res, IR_LINKAGE_CONSTANT);
129 initializer = create_initializer_tarval(tv);
130 set_entity_initializer(res, initializer);
132 pmap_insert(isa->tv_ent, tv, res);
136 ir_node *ia32_create_Immediate(ir_entity *symconst, int symconst_sign, long val)
138 ir_graph *irg = current_ir_graph;
139 ir_node *start_block = get_irg_start_block(irg);
140 ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, symconst,
141 symconst_sign, no_pic_adjust, val);
142 arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
147 const arch_register_t *ia32_get_clobber_register(const char *clobber)
149 const arch_register_t *reg = NULL;
152 const arch_register_class_t *cls;
154 /* TODO: construct a hashmap instead of doing linear search for clobber
156 for (c = 0; c < N_CLASSES; ++c) {
157 cls = & ia32_reg_classes[c];
158 for (r = 0; r < cls->n_regs; ++r) {
159 const arch_register_t *temp_reg = arch_register_for_index(cls, r);
160 if (strcmp(temp_reg->name, clobber) == 0
161 || (c == CLASS_ia32_gp && strcmp(temp_reg->name+1, clobber) == 0)) {
173 int ia32_mode_needs_gp_reg(ir_mode *mode)
175 if (mode == mode_fpcw)
177 if (get_mode_size_bits(mode) > 32)
179 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
182 static void parse_asm_constraints(constraint_t *constraint, const char *c,
185 char immediate_type = '\0';
186 unsigned limited = 0;
187 const arch_register_class_t *cls = NULL;
188 int memory_possible = 0;
189 int all_registers_allowed = 0;
193 memset(constraint, 0, sizeof(constraint[0]));
194 constraint->same_as = -1;
197 /* a memory constraint: no need to do anything in backend about it
198 * (the dependencies are already respected by the memory edge of
203 /* TODO: improve error messages with node and source info. (As users can
204 * easily hit these) */
212 /* Skip out/in-out marker */
222 while (*c != 0 && *c != ',')
227 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
228 cls = &ia32_reg_classes[CLASS_ia32_gp];
229 limited |= 1 << REG_EAX;
232 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
233 cls = &ia32_reg_classes[CLASS_ia32_gp];
234 limited |= 1 << REG_EBX;
237 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
238 cls = &ia32_reg_classes[CLASS_ia32_gp];
239 limited |= 1 << REG_ECX;
242 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
243 cls = &ia32_reg_classes[CLASS_ia32_gp];
244 limited |= 1 << REG_EDX;
247 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
248 cls = &ia32_reg_classes[CLASS_ia32_gp];
249 limited |= 1 << REG_EDI;
252 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
253 cls = &ia32_reg_classes[CLASS_ia32_gp];
254 limited |= 1 << REG_ESI;
258 /* q means lower part of the regs only, this makes no
259 * difference to Q for us (we only assign whole registers) */
260 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
261 cls = &ia32_reg_classes[CLASS_ia32_gp];
262 limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
266 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
267 cls = &ia32_reg_classes[CLASS_ia32_gp];
268 limited |= 1 << REG_EAX | 1 << REG_EDX;
271 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
272 cls = &ia32_reg_classes[CLASS_ia32_gp];
273 limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
274 1 << REG_EDX | 1 << REG_ESI | 1 << REG_EDI |
281 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
282 panic("multiple register classes not supported");
283 cls = &ia32_reg_classes[CLASS_ia32_gp];
284 all_registers_allowed = 1;
290 /* TODO: mark values so the x87 simulator knows about t and u */
291 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_vfp])
292 panic("multiple register classes not supported");
293 cls = &ia32_reg_classes[CLASS_ia32_vfp];
294 all_registers_allowed = 1;
299 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_xmm])
300 panic("multiple register classes not supproted");
301 cls = &ia32_reg_classes[CLASS_ia32_xmm];
302 all_registers_allowed = 1;
312 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
313 panic("multiple register classes not supported");
314 if (immediate_type != '\0')
315 panic("multiple immediate types not supported");
316 cls = &ia32_reg_classes[CLASS_ia32_gp];
321 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
322 panic("multiple register classes not supported");
323 if (immediate_type != '\0')
324 panic("multiple immediate types not supported");
325 cls = &ia32_reg_classes[CLASS_ia32_gp];
326 immediate_type = 'i';
331 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
332 panic("multiple register classes not supported");
333 if (immediate_type != '\0')
334 panic("multiple immediate types not supported");
335 immediate_type = 'i';
336 cls = &ia32_reg_classes[CLASS_ia32_gp];
337 all_registers_allowed = 1;
352 panic("can only specify same constraint on input");
354 sscanf(c, "%d%n", &same_as, &p);
364 /* memory constraint no need to do anything in backend about it
365 * (the dependencies are already respected by the memory edge of
370 case 'E': /* no float consts yet */
371 case 'F': /* no float consts yet */
372 case 's': /* makes no sense on x86 */
373 case '<': /* no autodecrement on x86 */
374 case '>': /* no autoincrement on x86 */
375 case 'C': /* sse constant not supported yet */
376 case 'G': /* 80387 constant not supported yet */
377 case 'y': /* we don't support mmx registers yet */
378 case 'Z': /* not available in 32 bit mode */
379 case 'e': /* not available in 32 bit mode */
380 panic("unsupported asm constraint '%c' found in (%+F)",
381 *c, current_ir_graph);
384 panic("unknown asm constraint '%c' found in (%+F)", *c,
393 panic("same as and register constraint not supported");
394 if (immediate_type != '\0')
395 panic("same as and immediate constraint not supported");
398 if (cls == NULL && same_as < 0) {
399 if (!memory_possible)
400 panic("no constraint specified for assembler input");
403 constraint->same_as = same_as;
404 constraint->cls = cls;
405 constraint->allowed_registers = limited;
406 constraint->all_registers_allowed = all_registers_allowed;
407 constraint->memory_possible = memory_possible;
408 constraint->immediate_type = immediate_type;
411 static bool can_match(const arch_register_req_t *in,
412 const arch_register_req_t *out)
414 if (in->cls != out->cls)
416 if ( (in->type & arch_register_req_type_limited) == 0
417 || (out->type & arch_register_req_type_limited) == 0 )
420 return (*in->limited & *out->limited) != 0;
423 static inline ir_node *get_new_node(ir_node *node)
426 if (be_transformer == TRANSFORMER_DEFAULT) {
427 return be_transform_node(node);
432 return be_transform_node(node);
436 ir_node *gen_ASM(ir_node *node)
438 ir_node *block = get_nodes_block(node);
439 ir_node *new_block = get_new_node(block);
440 dbg_info *dbgi = get_irn_dbg_info(node);
447 int n_out_constraints;
449 const arch_register_req_t **out_reg_reqs;
450 const arch_register_req_t **in_reg_reqs;
451 ia32_asm_reg_t *register_map;
452 unsigned reg_map_size = 0;
453 struct obstack *obst;
454 const ir_asm_constraint *in_constraints;
455 const ir_asm_constraint *out_constraints;
457 int clobbers_flags = 0;
458 unsigned clobber_bits[N_CLASSES];
460 backend_info_t *info;
462 memset(&clobber_bits, 0, sizeof(clobber_bits));
464 /* workaround for lots of buggy code out there as most people think volatile
465 * asm is enough for everything and forget the flags (linux kernel, etc.)
467 if (get_irn_pinned(node) == op_pin_state_pinned) {
471 arity = get_irn_arity(node);
472 in = ALLOCANZ(ir_node*, arity);
474 clobbers = get_ASM_clobbers(node);
476 for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
477 const arch_register_req_t *req;
478 const char *c = get_id_str(clobbers[i]);
480 if (strcmp(c, "memory") == 0)
482 if (strcmp(c, "cc") == 0) {
487 req = parse_clobber(c);
488 clobber_bits[req->cls->index] |= *req->limited;
492 n_out_constraints = get_ASM_n_output_constraints(node);
493 out_arity = n_out_constraints + n_clobbers;
495 in_constraints = get_ASM_input_constraints(node);
496 out_constraints = get_ASM_output_constraints(node);
498 /* determine size of register_map */
499 for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
500 const ir_asm_constraint *constraint = &out_constraints[out_idx];
501 if (constraint->pos > reg_map_size)
502 reg_map_size = constraint->pos;
504 for (i = 0; i < arity; ++i) {
505 const ir_asm_constraint *constraint = &in_constraints[i];
506 if (constraint->pos > reg_map_size)
507 reg_map_size = constraint->pos;
511 obst = get_irg_obstack(current_ir_graph);
512 register_map = NEW_ARR_D(ia32_asm_reg_t, obst, reg_map_size);
513 memset(register_map, 0, reg_map_size * sizeof(register_map[0]));
515 /* construct output constraints */
516 out_size = out_arity + 1;
517 out_reg_reqs = obstack_alloc(obst, out_size * sizeof(out_reg_reqs[0]));
519 for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
520 const ir_asm_constraint *constraint = &out_constraints[out_idx];
521 const char *c = get_id_str(constraint->constraint);
522 unsigned pos = constraint->pos;
523 constraint_t parsed_constraint;
524 const arch_register_req_t *req;
526 parse_asm_constraints(&parsed_constraint, c, 1);
527 req = make_register_req(&parsed_constraint, n_out_constraints,
528 out_reg_reqs, out_idx);
529 out_reg_reqs[out_idx] = req;
531 register_map[pos].use_input = 0;
532 register_map[pos].valid = 1;
533 register_map[pos].memory = 0;
534 register_map[pos].inout_pos = out_idx;
535 register_map[pos].mode = constraint->mode;
538 /* inputs + input constraints */
539 in_reg_reqs = obstack_alloc(obst, arity * sizeof(in_reg_reqs[0]));
540 for (i = 0; i < arity; ++i) {
541 ir_node *pred = get_irn_n(node, i);
542 const ir_asm_constraint *constraint = &in_constraints[i];
543 ident *constr_id = constraint->constraint;
544 const char *c = get_id_str(constr_id);
545 unsigned pos = constraint->pos;
546 int is_memory_op = 0;
547 ir_node *input = NULL;
548 unsigned r_clobber_bits;
549 constraint_t parsed_constraint;
550 const arch_register_req_t *req;
552 parse_asm_constraints(&parsed_constraint, c, 0);
553 if (parsed_constraint.cls != NULL) {
554 r_clobber_bits = clobber_bits[parsed_constraint.cls->index];
555 if (r_clobber_bits != 0) {
556 if (parsed_constraint.all_registers_allowed) {
557 parsed_constraint.all_registers_allowed = 0;
558 be_abi_set_non_ignore_regs(be_get_irg_abi(env_cg->irg),
559 parsed_constraint.cls,
560 &parsed_constraint.allowed_registers);
562 parsed_constraint.allowed_registers &= ~r_clobber_bits;
566 req = make_register_req(&parsed_constraint, n_out_constraints,
568 in_reg_reqs[i] = req;
570 if (parsed_constraint.immediate_type != '\0') {
571 char imm_type = parsed_constraint.immediate_type;
572 input = try_create_Immediate(pred, imm_type);
576 ir_node *pred = get_irn_n(node, i);
577 input = get_new_node(pred);
579 if (parsed_constraint.cls == NULL
580 && parsed_constraint.same_as < 0) {
582 } else if (parsed_constraint.memory_possible) {
583 /* TODO: match Load or Load/Store if memory possible is set */
588 register_map[pos].use_input = 1;
589 register_map[pos].valid = 1;
590 register_map[pos].memory = is_memory_op;
591 register_map[pos].inout_pos = i;
592 register_map[pos].mode = constraint->mode;
596 for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
597 const char *c = get_id_str(clobbers[i]);
598 const arch_register_req_t *req;
600 if (strcmp(c, "memory") == 0 || strcmp(c, "cc") == 0)
603 req = parse_clobber(c);
604 out_reg_reqs[out_idx] = req;
608 /* count inputs which are real values (and not memory) */
610 for (i = 0; i < arity; ++i) {
611 ir_node *in = get_irn_n(node, i);
612 if (get_irn_mode(in) == mode_M)
617 /* Attempt to make ASM node register pressure faithful.
618 * (This does not work for complicated cases yet!)
620 * Algorithm: Check if there are fewer inputs or outputs (I will call this
621 * the smaller list). Then try to match each constraint of the smaller list
622 * to 1 of the other list. If we can't match it, then we have to add a dummy
623 * input/output to the other list
625 * FIXME: This is still broken in lots of cases. But at least better than
627 * FIXME: need to do this per register class...
629 if (out_arity <= value_arity) {
630 int orig_arity = arity;
633 bitset_t *used_ins = bitset_alloca(arity);
634 for (o = 0; o < out_arity; ++o) {
636 const arch_register_req_t *outreq = out_reg_reqs[o];
638 if (outreq->cls == NULL) {
642 for (i = 0; i < orig_arity; ++i) {
643 const arch_register_req_t *inreq;
644 if (bitset_is_set(used_ins, i))
646 inreq = in_reg_reqs[i];
647 if (!can_match(outreq, inreq))
649 bitset_set(used_ins, i);
652 /* did we find any match? */
656 /* we might need more space in the input arrays */
657 if (arity >= in_size) {
658 const arch_register_req_t **new_in_reg_reqs;
663 = obstack_alloc(obst, in_size*sizeof(in_reg_reqs[0]));
664 memcpy(new_in_reg_reqs, in_reg_reqs, arity * sizeof(new_in_reg_reqs[0]));
665 new_in = ALLOCANZ(ir_node*, in_size);
666 memcpy(new_in, in, arity*sizeof(new_in[0]));
668 in_reg_reqs = new_in_reg_reqs;
672 /* add a new (dummy) input which occupies the register */
673 assert(outreq->type & arch_register_req_type_limited);
674 in_reg_reqs[arity] = outreq;
675 in[arity] = new_bd_ia32_ProduceVal(NULL, block);
676 be_dep_on_frame(in[arity]);
681 bitset_t *used_outs = bitset_alloca(out_arity);
682 int orig_out_arity = out_arity;
683 for (i = 0; i < arity; ++i) {
685 const arch_register_req_t *inreq = in_reg_reqs[i];
687 if (inreq->cls == NULL) {
691 for (o = 0; o < orig_out_arity; ++o) {
692 const arch_register_req_t *outreq;
693 if (bitset_is_set(used_outs, o))
695 outreq = out_reg_reqs[o];
696 if (!can_match(outreq, inreq))
698 bitset_set(used_outs, i);
701 /* did we find any match? */
702 if (o < orig_out_arity)
705 /* we might need more space in the output arrays */
706 if (out_arity >= out_size) {
707 const arch_register_req_t **new_out_reg_reqs;
711 = obstack_alloc(obst, out_size*sizeof(out_reg_reqs[0]));
712 memcpy(new_out_reg_reqs, out_reg_reqs,
713 out_arity * sizeof(new_out_reg_reqs[0]));
714 out_reg_reqs = new_out_reg_reqs;
717 /* add a new (dummy) output which occupies the register */
718 assert(inreq->type & arch_register_req_type_limited);
719 out_reg_reqs[out_arity] = inreq;
724 /* append none register requirement for the memory output */
725 if (out_arity + 1 >= out_size) {
726 const arch_register_req_t **new_out_reg_reqs;
728 out_size = out_arity + 1;
730 = obstack_alloc(obst, out_size*sizeof(out_reg_reqs[0]));
731 memcpy(new_out_reg_reqs, out_reg_reqs,
732 out_arity * sizeof(new_out_reg_reqs[0]));
733 out_reg_reqs = new_out_reg_reqs;
736 /* add a new (dummy) output which occupies the register */
737 out_reg_reqs[out_arity] = arch_no_register_req;
740 new_node = new_bd_ia32_Asm(dbgi, new_block, arity, in, out_arity,
741 get_ASM_text(node), register_map);
744 be_dep_on_frame(new_node);
746 info = be_get_info(new_node);
747 for (i = 0; i < out_arity; ++i) {
748 info->out_infos[i].req = out_reg_reqs[i];
750 set_ia32_in_req_all(new_node, in_reg_reqs);
752 SET_IA32_ORIG_NODE(new_node, node);
757 ir_node *gen_CopyB(ir_node *node)
759 ir_node *block = get_new_node(get_nodes_block(node));
760 ir_node *src = get_CopyB_src(node);
761 ir_node *new_src = get_new_node(src);
762 ir_node *dst = get_CopyB_dst(node);
763 ir_node *new_dst = get_new_node(dst);
764 ir_node *mem = get_CopyB_mem(node);
765 ir_node *new_mem = get_new_node(mem);
767 dbg_info *dbgi = get_irn_dbg_info(node);
768 int size = get_type_size_bytes(get_CopyB_type(node));
771 /* If we have to copy more than 32 bytes, we use REP MOVSx and */
772 /* then we need the size explicitly in ECX. */
773 if (size >= 32 * 4) {
774 rem = size & 0x3; /* size % 4 */
777 res = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, size);
778 be_dep_on_frame(res);
780 res = new_bd_ia32_CopyB(dbgi, block, new_dst, new_src, res, new_mem, rem);
783 ir_fprintf(stderr, "Optimization warning copyb %+F with size <4\n",
786 res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size);
789 SET_IA32_ORIG_NODE(res, node);
794 ir_node *gen_Proj_tls(ir_node *node)
796 ir_node *block = get_new_node(get_nodes_block(node));
799 res = new_bd_ia32_LdTls(NULL, block, mode_Iu);
804 ir_node *gen_Unknown(ir_node *node)
806 ir_mode *mode = get_irn_mode(node);
807 ir_graph *irg = current_ir_graph;
808 dbg_info *dbgi = get_irn_dbg_info(node);
809 ir_node *block = get_irg_start_block(irg);
812 if (mode_is_float(mode)) {
813 if (ia32_cg_config.use_sse2) {
814 res = new_bd_ia32_xUnknown(dbgi, block);
816 res = new_bd_ia32_vfldz(dbgi, block);
818 } else if (ia32_mode_needs_gp_reg(mode)) {
819 res = new_bd_ia32_Unknown(dbgi, block);
821 panic("unsupported Unknown-Mode");
824 be_dep_on_frame(res);
828 const arch_register_req_t *make_register_req(const constraint_t *constraint,
829 int n_outs, const arch_register_req_t **out_reqs, int pos)
831 struct obstack *obst = get_irg_obstack(current_ir_graph);
832 int same_as = constraint->same_as;
833 arch_register_req_t *req;
836 const arch_register_req_t *other_constr;
838 if (same_as >= n_outs)
839 panic("invalid output number in same_as constraint");
841 other_constr = out_reqs[same_as];
843 req = obstack_alloc(obst, sizeof(req[0]));
844 *req = *other_constr;
845 req->type |= arch_register_req_type_should_be_same;
846 req->other_same = 1U << pos;
848 /* switch constraints. This is because in firm we have same_as
849 * constraints on the output constraints while in the gcc asm syntax
850 * they are specified on the input constraints */
851 out_reqs[same_as] = req;
855 /* pure memory ops */
856 if (constraint->cls == NULL) {
857 return &no_register_req;
860 if (constraint->allowed_registers != 0
861 && !constraint->all_registers_allowed) {
862 unsigned *limited_ptr;
864 req = obstack_alloc(obst, sizeof(req[0]) + sizeof(unsigned));
865 memset(req, 0, sizeof(req[0]));
866 limited_ptr = (unsigned*) (req+1);
868 req->type = arch_register_req_type_limited;
869 *limited_ptr = constraint->allowed_registers;
870 req->limited = limited_ptr;
872 req = obstack_alloc(obst, sizeof(req[0]));
873 memset(req, 0, sizeof(req[0]));
874 req->type = arch_register_req_type_normal;
876 req->cls = constraint->cls;
881 const arch_register_req_t *parse_clobber(const char *clobber)
883 struct obstack *obst = get_irg_obstack(current_ir_graph);
884 const arch_register_t *reg = ia32_get_clobber_register(clobber);
885 arch_register_req_t *req;
889 panic("Register '%s' mentioned in asm clobber is unknown", clobber);
892 assert(reg->index < 32);
894 limited = obstack_alloc(obst, sizeof(limited[0]));
895 *limited = 1 << reg->index;
897 req = obstack_alloc(obst, sizeof(req[0]));
898 memset(req, 0, sizeof(req[0]));
899 req->type = arch_register_req_type_limited;
900 req->cls = arch_register_get_class(reg);
901 req->limited = limited;
907 int prevents_AM(ir_node *const block, ir_node *const am_candidate,
908 ir_node *const other)
910 if (get_nodes_block(other) != block)
913 if (is_Sync(other)) {
916 for (i = get_Sync_n_preds(other) - 1; i >= 0; --i) {
917 ir_node *const pred = get_Sync_pred(other, i);
919 if (get_nodes_block(pred) != block)
922 /* Do not block ourselves from getting eaten */
923 if (is_Proj(pred) && get_Proj_pred(pred) == am_candidate)
926 if (!heights_reachable_in_block(heights, pred, am_candidate))
934 /* Do not block ourselves from getting eaten */
935 if (is_Proj(other) && get_Proj_pred(other) == am_candidate)
938 if (!heights_reachable_in_block(heights, other, am_candidate))
945 ir_node *try_create_Immediate(ir_node *node, char immediate_constraint_type)
948 ir_entity *symconst_ent = NULL;
950 ir_node *cnst = NULL;
951 ir_node *symconst = NULL;
954 mode = get_irn_mode(node);
955 if (!mode_is_int(mode) && !mode_is_reference(mode)) {
959 if (is_Const(node)) {
962 } else if (is_Global(node)) {
965 } else if (is_Add(node)) {
966 ir_node *left = get_Add_left(node);
967 ir_node *right = get_Add_right(node);
968 if (is_Const(left) && is_Global(right)) {
971 } else if (is_Global(left) && is_Const(right)) {
980 tarval *offset = get_Const_tarval(cnst);
981 if (!tarval_is_long(offset)) {
982 ir_fprintf(stderr, "Optimisation Warning: tarval of %+F is not a long?\n", cnst);
986 val = get_tarval_long(offset);
987 if (!check_immediate_constraint(val, immediate_constraint_type))
990 if (symconst != NULL) {
991 if (immediate_constraint_type != 0) {
992 /* we need full 32bits for symconsts */
996 symconst_ent = get_Global_entity(symconst);
998 if (cnst == NULL && symconst == NULL)
1001 new_node = ia32_create_Immediate(symconst_ent, 0, val);