2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the common parts of IR transformation from
23 * firm into ia32-Firm.
24 * @author Matthias Braun, Sebastian Buchwald
25 * @version $Id: ia32_common_transform.c 21012 2008-08-06 13:35:17Z beck $
36 #include "../betranshlp.h"
40 #include "ia32_architecture.h"
41 #include "ia32_common_transform.h"
42 #include "ia32_new_nodes.h"
44 #include "gen_ia32_new_nodes.h"
45 #include "gen_ia32_regalloc_if.h"
47 ir_heights_t *ia32_heights = NULL;
49 static int check_immediate_constraint(long val, char immediate_constraint_type)
51 switch (immediate_constraint_type) {
55 case 'I': return 0 <= val && val <= 31;
56 case 'J': return 0 <= val && val <= 63;
57 case 'K': return -128 <= val && val <= 127;
58 case 'L': return val == 0xff || val == 0xffff;
59 case 'M': return 0 <= val && val <= 3;
60 case 'N': return 0 <= val && val <= 255;
61 case 'O': return 0 <= val && val <= 127;
63 default: panic("Invalid immediate constraint found");
68 * Get a primitive type for a mode with alignment 16.
70 static ir_type *ia32_get_prim_type(pmap *types, ir_mode *mode)
72 ir_type *res = (ir_type*)pmap_get(types, mode);
76 res = new_type_primitive(mode);
77 if (get_mode_size_bits(mode) >= 80) {
78 set_type_alignment_bytes(res, 16);
80 pmap_insert(types, mode, res);
84 ir_entity *ia32_create_float_const_entity(ir_node *cnst)
86 ir_graph *irg = get_irn_irg(cnst);
87 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
88 ia32_isa_t *isa = (ia32_isa_t*) arch_env;
89 ir_tarval *tv = get_Const_tarval(cnst);
90 ir_entity *res = (ir_entity*)pmap_get(isa->tv_ent, tv);
91 ir_initializer_t *initializer;
98 mode = get_tarval_mode(tv);
100 if (! ia32_cg_config.use_sse2) {
101 /* try to reduce the mode to produce smaller sized entities */
102 if (mode != mode_F) {
103 if (tarval_ieee754_can_conv_lossless(tv, mode_F)) {
105 tv = tarval_convert_to(tv, mode);
106 } else if (mode != mode_D) {
107 if (tarval_ieee754_can_conv_lossless(tv, mode_D)) {
109 tv = tarval_convert_to(tv, mode);
115 tp = ia32_get_prim_type(isa->types, mode);
116 res = new_entity(get_glob_type(), id_unique("C%u"), tp);
117 set_entity_ld_ident(res, get_entity_ident(res));
118 set_entity_visibility(res, ir_visibility_private);
119 add_entity_linkage(res, IR_LINKAGE_CONSTANT);
121 initializer = create_initializer_tarval(tv);
122 set_entity_initializer(res, initializer);
124 pmap_insert(isa->tv_ent, tv, res);
128 ir_node *ia32_create_Immediate(ir_entity *symconst, int symconst_sign, long val)
130 ir_graph *irg = current_ir_graph;
131 ir_node *start_block = get_irg_start_block(irg);
132 ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, symconst,
133 symconst_sign, ia32_no_pic_adjust, val);
134 arch_set_irn_register(immediate, &ia32_registers[REG_GP_NOREG]);
139 const arch_register_t *ia32_get_clobber_register(const char *clobber)
141 const arch_register_t *reg = NULL;
144 const arch_register_class_t *cls;
146 /* TODO: construct a hashmap instead of doing linear search for clobber
148 for (c = 0; c < N_IA32_CLASSES; ++c) {
149 cls = & ia32_reg_classes[c];
150 for (r = 0; r < cls->n_regs; ++r) {
151 const arch_register_t *temp_reg = arch_register_for_index(cls, r);
152 if (strcmp(temp_reg->name, clobber) == 0
153 || (c == CLASS_ia32_gp && strcmp(temp_reg->name+1, clobber) == 0)) {
165 int ia32_mode_needs_gp_reg(ir_mode *mode)
167 if (mode == ia32_mode_fpcw)
169 if (get_mode_size_bits(mode) > 32)
171 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
174 static void parse_asm_constraints(constraint_t *constraint, const char *c,
177 char immediate_type = '\0';
178 unsigned limited = 0;
179 const arch_register_class_t *cls = NULL;
180 int memory_possible = 0;
181 int all_registers_allowed = 0;
185 memset(constraint, 0, sizeof(constraint[0]));
186 constraint->same_as = -1;
189 /* a memory constraint: no need to do anything in backend about it
190 * (the dependencies are already respected by the memory edge of
195 /* TODO: improve error messages with node and source info. (As users can
196 * easily hit these) */
204 /* Skip out/in-out marker */
214 while (*c != 0 && *c != ',')
219 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
220 cls = &ia32_reg_classes[CLASS_ia32_gp];
221 limited |= 1 << REG_GP_EAX;
224 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
225 cls = &ia32_reg_classes[CLASS_ia32_gp];
226 limited |= 1 << REG_GP_EBX;
229 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
230 cls = &ia32_reg_classes[CLASS_ia32_gp];
231 limited |= 1 << REG_GP_ECX;
234 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
235 cls = &ia32_reg_classes[CLASS_ia32_gp];
236 limited |= 1 << REG_GP_EDX;
239 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
240 cls = &ia32_reg_classes[CLASS_ia32_gp];
241 limited |= 1 << REG_GP_EDI;
244 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
245 cls = &ia32_reg_classes[CLASS_ia32_gp];
246 limited |= 1 << REG_GP_ESI;
250 /* q means lower part of the regs only, this makes no
251 * difference to Q for us (we only assign whole registers) */
252 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
253 cls = &ia32_reg_classes[CLASS_ia32_gp];
254 limited |= 1 << REG_GP_EAX | 1 << REG_GP_EBX | 1 << REG_GP_ECX |
258 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
259 cls = &ia32_reg_classes[CLASS_ia32_gp];
260 limited |= 1 << REG_GP_EAX | 1 << REG_GP_EDX;
263 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
264 cls = &ia32_reg_classes[CLASS_ia32_gp];
265 limited |= 1 << REG_GP_EAX | 1 << REG_GP_EBX | 1 << REG_GP_ECX |
266 1 << REG_GP_EDX | 1 << REG_GP_ESI | 1 << REG_GP_EDI |
273 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
274 panic("multiple register classes not supported");
275 cls = &ia32_reg_classes[CLASS_ia32_gp];
276 all_registers_allowed = 1;
282 /* TODO: mark values so the x87 simulator knows about t and u */
283 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_vfp])
284 panic("multiple register classes not supported");
285 cls = &ia32_reg_classes[CLASS_ia32_vfp];
286 all_registers_allowed = 1;
291 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_xmm])
292 panic("multiple register classes not supproted");
293 cls = &ia32_reg_classes[CLASS_ia32_xmm];
294 all_registers_allowed = 1;
304 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
305 panic("multiple register classes not supported");
306 if (immediate_type != '\0')
307 panic("multiple immediate types not supported");
308 cls = &ia32_reg_classes[CLASS_ia32_gp];
313 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
314 panic("multiple register classes not supported");
315 if (immediate_type != '\0')
316 panic("multiple immediate types not supported");
317 cls = &ia32_reg_classes[CLASS_ia32_gp];
318 immediate_type = 'i';
323 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
324 panic("multiple register classes not supported");
325 if (immediate_type != '\0')
326 panic("multiple immediate types not supported");
327 immediate_type = 'i';
328 cls = &ia32_reg_classes[CLASS_ia32_gp];
329 all_registers_allowed = 1;
344 panic("can only specify same constraint on input");
346 sscanf(c, "%d%n", &same_as, &p);
356 /* memory constraint no need to do anything in backend about it
357 * (the dependencies are already respected by the memory edge of
362 case 'E': /* no float consts yet */
363 case 'F': /* no float consts yet */
364 case 's': /* makes no sense on x86 */
365 case '<': /* no autodecrement on x86 */
366 case '>': /* no autoincrement on x86 */
367 case 'C': /* sse constant not supported yet */
368 case 'G': /* 80387 constant not supported yet */
369 case 'y': /* we don't support mmx registers yet */
370 case 'Z': /* not available in 32 bit mode */
371 case 'e': /* not available in 32 bit mode */
372 panic("unsupported asm constraint '%c' found in (%+F)",
373 *c, current_ir_graph);
376 panic("unknown asm constraint '%c' found in (%+F)", *c,
385 panic("same as and register constraint not supported");
386 if (immediate_type != '\0')
387 panic("same as and immediate constraint not supported");
390 if (cls == NULL && same_as < 0) {
391 if (!memory_possible)
392 panic("no constraint specified for assembler input");
395 constraint->same_as = same_as;
396 constraint->cls = cls;
397 constraint->allowed_registers = limited;
398 constraint->all_registers_allowed = all_registers_allowed;
399 constraint->memory_possible = memory_possible;
400 constraint->immediate_type = immediate_type;
403 static bool can_match(const arch_register_req_t *in,
404 const arch_register_req_t *out)
406 if (in->cls != out->cls)
408 if ( (in->type & arch_register_req_type_limited) == 0
409 || (out->type & arch_register_req_type_limited) == 0 )
412 return (*in->limited & *out->limited) != 0;
415 static inline ir_node *get_new_node(ir_node *node)
418 if (be_transformer == TRANSFORMER_DEFAULT) {
419 return be_transform_node(node);
424 return be_transform_node(node);
428 ir_node *ia32_gen_ASM(ir_node *node)
430 ir_node *block = get_nodes_block(node);
431 ir_node *new_block = get_new_node(block);
432 dbg_info *dbgi = get_irn_dbg_info(node);
439 int n_out_constraints;
441 const arch_register_req_t **out_reg_reqs;
442 const arch_register_req_t **in_reg_reqs;
443 ia32_asm_reg_t *register_map;
444 unsigned reg_map_size = 0;
445 struct obstack *obst;
446 const ir_asm_constraint *in_constraints;
447 const ir_asm_constraint *out_constraints;
449 int clobbers_flags = 0;
450 unsigned clobber_bits[N_IA32_CLASSES];
452 backend_info_t *info;
454 memset(&clobber_bits, 0, sizeof(clobber_bits));
456 /* workaround for lots of buggy code out there as most people think volatile
457 * asm is enough for everything and forget the flags (linux kernel, etc.)
459 if (get_irn_pinned(node) == op_pin_state_pinned) {
463 arity = get_irn_arity(node);
464 in = ALLOCANZ(ir_node*, arity);
466 clobbers = get_ASM_clobbers(node);
468 for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
469 const arch_register_req_t *req;
470 const char *c = get_id_str(clobbers[i]);
472 if (strcmp(c, "memory") == 0)
474 if (strcmp(c, "cc") == 0) {
479 req = ia32_parse_clobber(c);
480 clobber_bits[req->cls->index] |= *req->limited;
484 n_out_constraints = get_ASM_n_output_constraints(node);
485 out_arity = n_out_constraints + n_clobbers;
487 in_constraints = get_ASM_input_constraints(node);
488 out_constraints = get_ASM_output_constraints(node);
490 /* determine size of register_map */
491 for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
492 const ir_asm_constraint *constraint = &out_constraints[out_idx];
493 if (constraint->pos > reg_map_size)
494 reg_map_size = constraint->pos;
496 for (i = 0; i < arity; ++i) {
497 const ir_asm_constraint *constraint = &in_constraints[i];
498 if (constraint->pos > reg_map_size)
499 reg_map_size = constraint->pos;
503 obst = get_irg_obstack(current_ir_graph);
504 register_map = NEW_ARR_D(ia32_asm_reg_t, obst, reg_map_size);
505 memset(register_map, 0, reg_map_size * sizeof(register_map[0]));
507 /* construct output constraints */
508 out_size = out_arity + 1;
509 out_reg_reqs = OALLOCN(obst, const arch_register_req_t*, out_size);
511 for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
512 const ir_asm_constraint *constraint = &out_constraints[out_idx];
513 const char *c = get_id_str(constraint->constraint);
514 unsigned pos = constraint->pos;
515 constraint_t parsed_constraint;
516 const arch_register_req_t *req;
518 parse_asm_constraints(&parsed_constraint, c, 1);
519 req = ia32_make_register_req(&parsed_constraint, n_out_constraints,
520 out_reg_reqs, out_idx);
521 out_reg_reqs[out_idx] = req;
523 register_map[pos].use_input = 0;
524 register_map[pos].valid = 1;
525 register_map[pos].memory = 0;
526 register_map[pos].inout_pos = out_idx;
527 register_map[pos].mode = constraint->mode;
530 /* inputs + input constraints */
531 in_reg_reqs = OALLOCN(obst, const arch_register_req_t*, arity);
532 for (i = 0; i < arity; ++i) {
533 ir_node *pred = get_irn_n(node, i);
534 const ir_asm_constraint *constraint = &in_constraints[i];
535 ident *constr_id = constraint->constraint;
536 const char *c = get_id_str(constr_id);
537 unsigned pos = constraint->pos;
538 int is_memory_op = 0;
539 ir_node *input = NULL;
540 unsigned r_clobber_bits;
541 constraint_t parsed_constraint;
542 const arch_register_req_t *req;
544 parse_asm_constraints(&parsed_constraint, c, 0);
545 if (parsed_constraint.cls != NULL) {
546 r_clobber_bits = clobber_bits[parsed_constraint.cls->index];
547 if (r_clobber_bits != 0) {
548 if (parsed_constraint.all_registers_allowed) {
549 parsed_constraint.all_registers_allowed = 0;
550 be_set_allocatable_regs(current_ir_graph,
551 parsed_constraint.cls,
552 &parsed_constraint.allowed_registers);
554 parsed_constraint.allowed_registers &= ~r_clobber_bits;
558 req = ia32_make_register_req(&parsed_constraint, n_out_constraints,
560 in_reg_reqs[i] = req;
562 if (parsed_constraint.immediate_type != '\0') {
563 char imm_type = parsed_constraint.immediate_type;
564 input = ia32_try_create_Immediate(pred, imm_type);
568 ir_node *pred = get_irn_n(node, i);
569 input = get_new_node(pred);
571 if (parsed_constraint.cls == NULL
572 && parsed_constraint.same_as < 0) {
574 } else if (parsed_constraint.memory_possible) {
575 /* TODO: match Load or Load/Store if memory possible is set */
580 register_map[pos].use_input = 1;
581 register_map[pos].valid = 1;
582 register_map[pos].memory = is_memory_op;
583 register_map[pos].inout_pos = i;
584 register_map[pos].mode = constraint->mode;
588 for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
589 const char *c = get_id_str(clobbers[i]);
590 const arch_register_req_t *req;
592 if (strcmp(c, "memory") == 0 || strcmp(c, "cc") == 0)
595 req = ia32_parse_clobber(c);
596 out_reg_reqs[out_idx] = req;
600 /* count inputs which are real values (and not memory) */
602 for (i = 0; i < arity; ++i) {
603 ir_node *in = get_irn_n(node, i);
604 if (get_irn_mode(in) == mode_M)
609 /* Attempt to make ASM node register pressure faithful.
610 * (This does not work for complicated cases yet!)
612 * Algorithm: Check if there are fewer inputs or outputs (I will call this
613 * the smaller list). Then try to match each constraint of the smaller list
614 * to 1 of the other list. If we can't match it, then we have to add a dummy
615 * input/output to the other list
617 * FIXME: This is still broken in lots of cases. But at least better than
619 * FIXME: need to do this per register class...
621 if (out_arity <= value_arity) {
622 int orig_arity = arity;
625 bitset_t *used_ins = bitset_alloca(arity);
626 for (o = 0; o < out_arity; ++o) {
628 const arch_register_req_t *outreq = out_reg_reqs[o];
630 if (outreq->cls == NULL) {
634 for (i = 0; i < orig_arity; ++i) {
635 const arch_register_req_t *inreq;
636 if (bitset_is_set(used_ins, i))
638 inreq = in_reg_reqs[i];
639 if (!can_match(outreq, inreq))
641 bitset_set(used_ins, i);
644 /* did we find any match? */
648 /* we might need more space in the input arrays */
649 if (arity >= in_size) {
650 const arch_register_req_t **new_in_reg_reqs;
654 new_in_reg_reqs = OALLOCN(obst, const arch_register_req_t*,
656 memcpy(new_in_reg_reqs, in_reg_reqs, arity * sizeof(new_in_reg_reqs[0]));
657 new_in = ALLOCANZ(ir_node*, in_size);
658 memcpy(new_in, in, arity*sizeof(new_in[0]));
660 in_reg_reqs = new_in_reg_reqs;
664 /* add a new (dummy) input which occupies the register */
665 assert(outreq->type & arch_register_req_type_limited);
666 in_reg_reqs[arity] = outreq;
667 in[arity] = new_bd_ia32_ProduceVal(NULL, block);
668 be_dep_on_frame(in[arity]);
673 bitset_t *used_outs = bitset_alloca(out_arity);
674 int orig_out_arity = out_arity;
675 for (i = 0; i < arity; ++i) {
677 const arch_register_req_t *inreq = in_reg_reqs[i];
679 if (inreq->cls == NULL) {
683 for (o = 0; o < orig_out_arity; ++o) {
684 const arch_register_req_t *outreq;
685 if (bitset_is_set(used_outs, o))
687 outreq = out_reg_reqs[o];
688 if (!can_match(outreq, inreq))
690 bitset_set(used_outs, i);
693 /* did we find any match? */
694 if (o < orig_out_arity)
697 /* we might need more space in the output arrays */
698 if (out_arity >= out_size) {
699 const arch_register_req_t **new_out_reg_reqs;
703 = OALLOCN(obst, const arch_register_req_t*, out_size);
704 memcpy(new_out_reg_reqs, out_reg_reqs,
705 out_arity * sizeof(new_out_reg_reqs[0]));
706 out_reg_reqs = new_out_reg_reqs;
709 /* add a new (dummy) output which occupies the register */
710 assert(inreq->type & arch_register_req_type_limited);
711 out_reg_reqs[out_arity] = inreq;
716 /* append none register requirement for the memory output */
717 if (out_arity + 1 >= out_size) {
718 const arch_register_req_t **new_out_reg_reqs;
720 out_size = out_arity + 1;
722 = OALLOCN(obst, const arch_register_req_t*, out_size);
723 memcpy(new_out_reg_reqs, out_reg_reqs,
724 out_arity * sizeof(new_out_reg_reqs[0]));
725 out_reg_reqs = new_out_reg_reqs;
728 /* add a new (dummy) output which occupies the register */
729 out_reg_reqs[out_arity] = arch_no_register_req;
732 new_node = new_bd_ia32_Asm(dbgi, new_block, arity, in, out_arity,
733 get_ASM_text(node), register_map);
736 be_dep_on_frame(new_node);
738 info = be_get_info(new_node);
739 for (i = 0; i < out_arity; ++i) {
740 info->out_infos[i].req = out_reg_reqs[i];
742 arch_set_in_register_reqs(new_node, in_reg_reqs);
744 SET_IA32_ORIG_NODE(new_node, node);
749 ir_node *ia32_gen_CopyB(ir_node *node)
751 ir_node *block = get_new_node(get_nodes_block(node));
752 ir_node *src = get_CopyB_src(node);
753 ir_node *new_src = get_new_node(src);
754 ir_node *dst = get_CopyB_dst(node);
755 ir_node *new_dst = get_new_node(dst);
756 ir_node *mem = get_CopyB_mem(node);
757 ir_node *new_mem = get_new_node(mem);
759 dbg_info *dbgi = get_irn_dbg_info(node);
760 int size = get_type_size_bytes(get_CopyB_type(node));
763 /* If we have to copy more than 32 bytes, we use REP MOVSx and */
764 /* then we need the size explicitly in ECX. */
765 if (size >= 32 * 4) {
766 rem = size & 0x3; /* size % 4 */
769 res = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, size);
770 be_dep_on_frame(res);
772 res = new_bd_ia32_CopyB(dbgi, block, new_dst, new_src, res, new_mem, rem);
775 ir_fprintf(stderr, "Optimization warning copyb %+F with size <4\n",
778 res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size);
781 SET_IA32_ORIG_NODE(res, node);
786 ir_node *ia32_gen_Proj_tls(ir_node *node)
788 ir_node *block = get_new_node(get_nodes_block(node));
791 res = new_bd_ia32_LdTls(NULL, block, mode_Iu);
796 ir_node *ia32_gen_Unknown(ir_node *node)
798 ir_mode *mode = get_irn_mode(node);
799 ir_graph *irg = current_ir_graph;
800 dbg_info *dbgi = get_irn_dbg_info(node);
801 ir_node *block = get_irg_start_block(irg);
804 if (mode_is_float(mode)) {
805 if (ia32_cg_config.use_sse2) {
806 res = new_bd_ia32_xUnknown(dbgi, block);
808 res = new_bd_ia32_vfldz(dbgi, block);
810 } else if (ia32_mode_needs_gp_reg(mode)) {
811 res = new_bd_ia32_Unknown(dbgi, block);
813 panic("unsupported Unknown-Mode");
816 be_dep_on_frame(res);
820 const arch_register_req_t *ia32_make_register_req(const constraint_t *constraint,
821 int n_outs, const arch_register_req_t **out_reqs, int pos)
823 struct obstack *obst = get_irg_obstack(current_ir_graph);
824 int same_as = constraint->same_as;
825 arch_register_req_t *req;
828 const arch_register_req_t *other_constr;
830 if (same_as >= n_outs)
831 panic("invalid output number in same_as constraint");
833 other_constr = out_reqs[same_as];
835 req = OALLOC(obst, arch_register_req_t);
836 *req = *other_constr;
837 req->type |= arch_register_req_type_should_be_same;
838 req->other_same = 1U << pos;
841 /* switch constraints. This is because in firm we have same_as
842 * constraints on the output constraints while in the gcc asm syntax
843 * they are specified on the input constraints */
844 out_reqs[same_as] = req;
848 /* pure memory ops */
849 if (constraint->cls == NULL) {
850 return arch_no_register_req;
853 if (constraint->allowed_registers != 0
854 && !constraint->all_registers_allowed) {
855 unsigned *limited_ptr;
857 req = (arch_register_req_t*)obstack_alloc(obst, sizeof(req[0]) + sizeof(unsigned));
858 memset(req, 0, sizeof(req[0]));
859 limited_ptr = (unsigned*) (req+1);
861 req->type = arch_register_req_type_limited;
862 *limited_ptr = constraint->allowed_registers;
863 req->limited = limited_ptr;
865 req = OALLOCZ(obst, arch_register_req_t);
866 req->type = arch_register_req_type_normal;
868 req->cls = constraint->cls;
874 const arch_register_req_t *ia32_parse_clobber(const char *clobber)
876 struct obstack *obst = get_irg_obstack(current_ir_graph);
877 const arch_register_t *reg = ia32_get_clobber_register(clobber);
878 arch_register_req_t *req;
882 panic("Register '%s' mentioned in asm clobber is unknown", clobber);
885 assert(reg->index < 32);
887 limited = OALLOC(obst, unsigned);
888 *limited = 1 << reg->index;
890 req = OALLOCZ(obst, arch_register_req_t);
891 req->type = arch_register_req_type_limited;
892 req->cls = arch_register_get_class(reg);
893 req->limited = limited;
900 int ia32_prevents_AM(ir_node *const block, ir_node *const am_candidate,
901 ir_node *const other)
903 if (get_nodes_block(other) != block)
906 if (is_Sync(other)) {
909 for (i = get_Sync_n_preds(other) - 1; i >= 0; --i) {
910 ir_node *const pred = get_Sync_pred(other, i);
912 if (get_nodes_block(pred) != block)
915 /* Do not block ourselves from getting eaten */
916 if (is_Proj(pred) && get_Proj_pred(pred) == am_candidate)
919 if (!heights_reachable_in_block(ia32_heights, pred, am_candidate))
927 /* Do not block ourselves from getting eaten */
928 if (is_Proj(other) && get_Proj_pred(other) == am_candidate)
931 if (!heights_reachable_in_block(ia32_heights, other, am_candidate))
938 ir_node *ia32_try_create_Immediate(ir_node *node, char immediate_constraint_type)
941 ir_entity *symconst_ent = NULL;
943 ir_node *cnst = NULL;
944 ir_node *symconst = NULL;
947 mode = get_irn_mode(node);
948 if (!mode_is_int(mode) && !mode_is_reference(mode)) {
952 if (is_Const(node)) {
955 } else if (is_Global(node)) {
958 } else if (is_Add(node)) {
959 ir_node *left = get_Add_left(node);
960 ir_node *right = get_Add_right(node);
961 if (is_Const(left) && is_Global(right)) {
964 } else if (is_Global(left) && is_Const(right)) {
973 ir_tarval *offset = get_Const_tarval(cnst);
974 if (!tarval_is_long(offset)) {
975 ir_fprintf(stderr, "Optimisation Warning: tarval of %+F is not a long?\n", cnst);
979 val = get_tarval_long(offset);
980 if (!check_immediate_constraint(val, immediate_constraint_type))
983 if (symconst != NULL) {
984 if (immediate_constraint_type != 0) {
985 /* we need full 32bits for symconsts */
989 symconst_ent = get_Global_entity(symconst);
991 if (cnst == NULL && symconst == NULL)
994 new_node = ia32_create_Immediate(symconst_ent, 0, val);