2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the common parts of IR transformation from
23 * firm into ia32-Firm.
24 * @author Matthias Braun, Sebastian Buchwald
25 * @version $Id: ia32_common_transform.c 21012 2008-08-06 13:35:17Z beck $
35 #include "../betranshlp.h"
39 #include "ia32_architecture.h"
40 #include "ia32_common_transform.h"
41 #include "ia32_new_nodes.h"
43 #include "gen_ia32_new_nodes.h"
44 #include "gen_ia32_regalloc_if.h"
46 /** hold the current code generator during transformation */
47 ia32_code_gen_t *env_cg = NULL;
49 heights_t *heights = NULL;
51 static const arch_register_req_t no_register_req = {
52 arch_register_req_type_none,
54 NULL, /* limit bitset */
59 static int check_immediate_constraint(long val, char immediate_constraint_type)
61 switch (immediate_constraint_type) {
65 case 'I': return 0 <= val && val <= 31;
66 case 'J': return 0 <= val && val <= 63;
67 case 'K': return -128 <= val && val <= 127;
68 case 'L': return val == 0xff || val == 0xffff;
69 case 'M': return 0 <= val && val <= 3;
70 case 'N': return 0 <= val && val <= 255;
71 case 'O': return 0 <= val && val <= 127;
73 default: panic("Invalid immediate constraint found");
77 /* creates a unique ident by adding a number to a tag */
78 ident *ia32_unique_id(const char *tag)
80 static unsigned id = 0;
83 snprintf(str, sizeof(str), tag, ++id);
84 return new_id_from_str(str);
88 * Get a primitive type for a mode with alignment 16.
90 static ir_type *ia32_get_prim_type(pmap *types, ir_mode *mode)
92 pmap_entry *e = pmap_find(types, mode);
96 res = new_type_primitive(mode);
97 if (get_mode_size_bits(mode) >= 80) {
98 set_type_alignment_bytes(res, 16);
100 pmap_insert(types, mode, res);
107 ir_entity *create_float_const_entity(ir_node *cnst)
109 ia32_isa_t *isa = env_cg->isa;
110 tarval *key = get_Const_tarval(cnst);
111 pmap_entry *e = pmap_find(isa->tv_ent, key);
117 ir_mode *mode = get_tarval_mode(tv);
120 if (! ia32_cg_config.use_sse2) {
121 /* try to reduce the mode to produce smaller sized entities */
122 if (mode != mode_F) {
123 if (tarval_ieee754_can_conv_lossless(tv, mode_F)) {
125 tv = tarval_convert_to(tv, mode);
126 } else if (mode != mode_D) {
127 if (tarval_ieee754_can_conv_lossless(tv, mode_D)) {
129 tv = tarval_convert_to(tv, mode);
135 if (mode == get_irn_mode(cnst)) {
136 /* mode was not changed */
137 tp = get_Const_type(cnst);
138 if (tp == firm_unknown_type)
139 tp = ia32_get_prim_type(isa->types, mode);
141 tp = ia32_get_prim_type(isa->types, mode);
143 res = new_entity(get_glob_type(), ia32_unique_id(".LC%u"), tp);
145 set_entity_ld_ident(res, get_entity_ident(res));
146 set_entity_visibility(res, ir_visibility_local);
147 add_entity_linkage(res, IR_LINKAGE_CONSTANT);
149 /* we create a new entity here: It's initialization must resist on the
151 rem = current_ir_graph;
152 current_ir_graph = get_const_code_irg();
153 set_atomic_ent_value(res, new_Const_type(tv, tp));
154 current_ir_graph = rem;
156 pmap_insert(isa->tv_ent, key, res);
164 ir_node *ia32_create_Immediate(ir_entity *symconst, int symconst_sign, long val)
166 ir_graph *irg = current_ir_graph;
167 ir_node *start_block = get_irg_start_block(irg);
168 ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, symconst,
169 symconst_sign, no_pic_adjust, val);
170 arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
175 const arch_register_t *ia32_get_clobber_register(const char *clobber)
177 const arch_register_t *reg = NULL;
180 const arch_register_class_t *cls;
182 /* TODO: construct a hashmap instead of doing linear search for clobber
184 for (c = 0; c < N_CLASSES; ++c) {
185 cls = & ia32_reg_classes[c];
186 for (r = 0; r < cls->n_regs; ++r) {
187 const arch_register_t *temp_reg = arch_register_for_index(cls, r);
188 if (strcmp(temp_reg->name, clobber) == 0
189 || (c == CLASS_ia32_gp && strcmp(temp_reg->name+1, clobber) == 0)) {
201 int ia32_mode_needs_gp_reg(ir_mode *mode)
203 if (mode == mode_fpcw)
205 if (get_mode_size_bits(mode) > 32)
207 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
210 static void parse_asm_constraints(constraint_t *constraint, const char *c,
213 char immediate_type = '\0';
214 unsigned limited = 0;
215 const arch_register_class_t *cls = NULL;
216 int memory_possible = 0;
217 int all_registers_allowed = 0;
221 memset(constraint, 0, sizeof(constraint[0]));
222 constraint->same_as = -1;
225 /* a memory constraint: no need to do anything in backend about it
226 * (the dependencies are already respected by the memory edge of
231 /* TODO: improve error messages with node and source info. (As users can
232 * easily hit these) */
240 /* Skip out/in-out marker */
250 while (*c != 0 && *c != ',')
255 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
256 cls = &ia32_reg_classes[CLASS_ia32_gp];
257 limited |= 1 << REG_EAX;
260 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
261 cls = &ia32_reg_classes[CLASS_ia32_gp];
262 limited |= 1 << REG_EBX;
265 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
266 cls = &ia32_reg_classes[CLASS_ia32_gp];
267 limited |= 1 << REG_ECX;
270 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
271 cls = &ia32_reg_classes[CLASS_ia32_gp];
272 limited |= 1 << REG_EDX;
275 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
276 cls = &ia32_reg_classes[CLASS_ia32_gp];
277 limited |= 1 << REG_EDI;
280 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
281 cls = &ia32_reg_classes[CLASS_ia32_gp];
282 limited |= 1 << REG_ESI;
286 /* q means lower part of the regs only, this makes no
287 * difference to Q for us (we only assign whole registers) */
288 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
289 cls = &ia32_reg_classes[CLASS_ia32_gp];
290 limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
294 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
295 cls = &ia32_reg_classes[CLASS_ia32_gp];
296 limited |= 1 << REG_EAX | 1 << REG_EDX;
299 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
300 cls = &ia32_reg_classes[CLASS_ia32_gp];
301 limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
302 1 << REG_EDX | 1 << REG_ESI | 1 << REG_EDI |
309 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
310 panic("multiple register classes not supported");
311 cls = &ia32_reg_classes[CLASS_ia32_gp];
312 all_registers_allowed = 1;
318 /* TODO: mark values so the x87 simulator knows about t and u */
319 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_vfp])
320 panic("multiple register classes not supported");
321 cls = &ia32_reg_classes[CLASS_ia32_vfp];
322 all_registers_allowed = 1;
327 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_xmm])
328 panic("multiple register classes not supproted");
329 cls = &ia32_reg_classes[CLASS_ia32_xmm];
330 all_registers_allowed = 1;
340 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
341 panic("multiple register classes not supported");
342 if (immediate_type != '\0')
343 panic("multiple immediate types not supported");
344 cls = &ia32_reg_classes[CLASS_ia32_gp];
349 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
350 panic("multiple register classes not supported");
351 if (immediate_type != '\0')
352 panic("multiple immediate types not supported");
353 cls = &ia32_reg_classes[CLASS_ia32_gp];
354 immediate_type = 'i';
359 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
360 panic("multiple register classes not supported");
361 if (immediate_type != '\0')
362 panic("multiple immediate types not supported");
363 immediate_type = 'i';
364 cls = &ia32_reg_classes[CLASS_ia32_gp];
365 all_registers_allowed = 1;
380 panic("can only specify same constraint on input");
382 sscanf(c, "%d%n", &same_as, &p);
392 /* memory constraint no need to do anything in backend about it
393 * (the dependencies are already respected by the memory edge of
398 case 'E': /* no float consts yet */
399 case 'F': /* no float consts yet */
400 case 's': /* makes no sense on x86 */
401 case '<': /* no autodecrement on x86 */
402 case '>': /* no autoincrement on x86 */
403 case 'C': /* sse constant not supported yet */
404 case 'G': /* 80387 constant not supported yet */
405 case 'y': /* we don't support mmx registers yet */
406 case 'Z': /* not available in 32 bit mode */
407 case 'e': /* not available in 32 bit mode */
408 panic("unsupported asm constraint '%c' found in (%+F)",
409 *c, current_ir_graph);
412 panic("unknown asm constraint '%c' found in (%+F)", *c,
421 panic("same as and register constraint not supported");
422 if (immediate_type != '\0')
423 panic("same as and immediate constraint not supported");
426 if (cls == NULL && same_as < 0) {
427 if (!memory_possible)
428 panic("no constraint specified for assembler input");
431 constraint->same_as = same_as;
432 constraint->cls = cls;
433 constraint->allowed_registers = limited;
434 constraint->all_registers_allowed = all_registers_allowed;
435 constraint->memory_possible = memory_possible;
436 constraint->immediate_type = immediate_type;
439 static bool can_match(const arch_register_req_t *in,
440 const arch_register_req_t *out)
442 if (in->cls != out->cls)
444 if ( (in->type & arch_register_req_type_limited) == 0
445 || (out->type & arch_register_req_type_limited) == 0 )
448 return (*in->limited & *out->limited) != 0;
451 static inline ir_node *get_new_node(ir_node *node)
454 if (be_transformer == TRANSFORMER_DEFAULT) {
455 return be_transform_node(node);
460 return be_transform_node(node);
464 ir_node *gen_ASM(ir_node *node)
466 ir_node *block = get_nodes_block(node);
467 ir_node *new_block = get_new_node(block);
468 dbg_info *dbgi = get_irn_dbg_info(node);
475 int n_out_constraints;
477 const arch_register_req_t **out_reg_reqs;
478 const arch_register_req_t **in_reg_reqs;
479 ia32_asm_reg_t *register_map;
480 unsigned reg_map_size = 0;
481 struct obstack *obst;
482 const ir_asm_constraint *in_constraints;
483 const ir_asm_constraint *out_constraints;
485 int clobbers_flags = 0;
486 unsigned clobber_bits[N_CLASSES];
488 backend_info_t *info;
490 memset(&clobber_bits, 0, sizeof(clobber_bits));
492 /* workaround for lots of buggy code out there as most people think volatile
493 * asm is enough for everything and forget the flags (linux kernel, etc.)
495 if (get_irn_pinned(node) == op_pin_state_pinned) {
499 arity = get_irn_arity(node);
500 in = ALLOCANZ(ir_node*, arity);
502 clobbers = get_ASM_clobbers(node);
504 for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
505 const arch_register_req_t *req;
506 const char *c = get_id_str(clobbers[i]);
508 if (strcmp(c, "memory") == 0)
510 if (strcmp(c, "cc") == 0) {
515 req = parse_clobber(c);
516 clobber_bits[req->cls->index] |= *req->limited;
520 n_out_constraints = get_ASM_n_output_constraints(node);
521 out_arity = n_out_constraints + n_clobbers;
523 in_constraints = get_ASM_input_constraints(node);
524 out_constraints = get_ASM_output_constraints(node);
526 /* determine size of register_map */
527 for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
528 const ir_asm_constraint *constraint = &out_constraints[out_idx];
529 if (constraint->pos > reg_map_size)
530 reg_map_size = constraint->pos;
532 for (i = 0; i < arity; ++i) {
533 const ir_asm_constraint *constraint = &in_constraints[i];
534 if (constraint->pos > reg_map_size)
535 reg_map_size = constraint->pos;
539 obst = get_irg_obstack(current_ir_graph);
540 register_map = NEW_ARR_D(ia32_asm_reg_t, obst, reg_map_size);
541 memset(register_map, 0, reg_map_size * sizeof(register_map[0]));
543 /* construct output constraints */
544 out_size = out_arity + 1;
545 out_reg_reqs = obstack_alloc(obst, out_size * sizeof(out_reg_reqs[0]));
547 for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
548 const ir_asm_constraint *constraint = &out_constraints[out_idx];
549 const char *c = get_id_str(constraint->constraint);
550 unsigned pos = constraint->pos;
551 constraint_t parsed_constraint;
552 const arch_register_req_t *req;
554 parse_asm_constraints(&parsed_constraint, c, 1);
555 req = make_register_req(&parsed_constraint, n_out_constraints,
556 out_reg_reqs, out_idx);
557 out_reg_reqs[out_idx] = req;
559 register_map[pos].use_input = 0;
560 register_map[pos].valid = 1;
561 register_map[pos].memory = 0;
562 register_map[pos].inout_pos = out_idx;
563 register_map[pos].mode = constraint->mode;
566 /* inputs + input constraints */
567 in_reg_reqs = obstack_alloc(obst, arity * sizeof(in_reg_reqs[0]));
568 for (i = 0; i < arity; ++i) {
569 ir_node *pred = get_irn_n(node, i);
570 const ir_asm_constraint *constraint = &in_constraints[i];
571 ident *constr_id = constraint->constraint;
572 const char *c = get_id_str(constr_id);
573 unsigned pos = constraint->pos;
574 int is_memory_op = 0;
575 ir_node *input = NULL;
576 unsigned r_clobber_bits;
577 constraint_t parsed_constraint;
578 const arch_register_req_t *req;
580 parse_asm_constraints(&parsed_constraint, c, 0);
581 if (parsed_constraint.cls != NULL) {
582 r_clobber_bits = clobber_bits[parsed_constraint.cls->index];
583 if (r_clobber_bits != 0) {
584 if (parsed_constraint.all_registers_allowed) {
585 parsed_constraint.all_registers_allowed = 0;
586 be_abi_set_non_ignore_regs(env_cg->birg->abi,
587 parsed_constraint.cls,
588 &parsed_constraint.allowed_registers);
590 parsed_constraint.allowed_registers &= ~r_clobber_bits;
594 req = make_register_req(&parsed_constraint, n_out_constraints,
596 in_reg_reqs[i] = req;
598 if (parsed_constraint.immediate_type != '\0') {
599 char imm_type = parsed_constraint.immediate_type;
600 input = try_create_Immediate(pred, imm_type);
604 ir_node *pred = get_irn_n(node, i);
605 input = get_new_node(pred);
607 if (parsed_constraint.cls == NULL
608 && parsed_constraint.same_as < 0) {
610 } else if (parsed_constraint.memory_possible) {
611 /* TODO: match Load or Load/Store if memory possible is set */
616 register_map[pos].use_input = 1;
617 register_map[pos].valid = 1;
618 register_map[pos].memory = is_memory_op;
619 register_map[pos].inout_pos = i;
620 register_map[pos].mode = constraint->mode;
624 for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
625 const char *c = get_id_str(clobbers[i]);
626 const arch_register_req_t *req;
628 if (strcmp(c, "memory") == 0 || strcmp(c, "cc") == 0)
631 req = parse_clobber(c);
632 out_reg_reqs[out_idx] = req;
636 /* count inputs which are real values (and not memory) */
638 for (i = 0; i < arity; ++i) {
639 ir_node *in = get_irn_n(node, i);
640 if (get_irn_mode(in) == mode_M)
645 /* Attempt to make ASM node register pressure faithful.
646 * (This does not work for complicated cases yet!)
648 * Algorithm: Check if there are fewer inputs or outputs (I will call this
649 * the smaller list). Then try to match each constraint of the smaller list
650 * to 1 of the other list. If we can't match it, then we have to add a dummy
651 * input/output to the other list
653 * FIXME: This is still broken in lots of cases. But at least better than
655 * FIXME: need to do this per register class...
657 if (out_arity <= value_arity) {
658 int orig_arity = arity;
661 bitset_t *used_ins = bitset_alloca(arity);
662 for (o = 0; o < out_arity; ++o) {
664 const arch_register_req_t *outreq = out_reg_reqs[o];
666 if (outreq->cls == NULL) {
670 for (i = 0; i < orig_arity; ++i) {
671 const arch_register_req_t *inreq;
672 if (bitset_is_set(used_ins, i))
674 inreq = in_reg_reqs[i];
675 if (!can_match(outreq, inreq))
677 bitset_set(used_ins, i);
680 /* did we find any match? */
684 /* we might need more space in the input arrays */
685 if (arity >= in_size) {
686 const arch_register_req_t **new_in_reg_reqs;
691 = obstack_alloc(obst, in_size*sizeof(in_reg_reqs[0]));
692 memcpy(new_in_reg_reqs, in_reg_reqs, arity * sizeof(new_in_reg_reqs[0]));
693 new_in = ALLOCANZ(ir_node*, in_size);
694 memcpy(new_in, in, arity*sizeof(new_in[0]));
696 in_reg_reqs = new_in_reg_reqs;
700 /* add a new (dummy) input which occupies the register */
701 assert(outreq->type & arch_register_req_type_limited);
702 in_reg_reqs[arity] = outreq;
703 in[arity] = new_bd_ia32_ProduceVal(NULL, block);
704 be_dep_on_frame(in[arity]);
709 bitset_t *used_outs = bitset_alloca(out_arity);
710 int orig_out_arity = out_arity;
711 for (i = 0; i < arity; ++i) {
713 const arch_register_req_t *inreq = in_reg_reqs[i];
715 if (inreq->cls == NULL) {
719 for (o = 0; o < orig_out_arity; ++o) {
720 const arch_register_req_t *outreq;
721 if (bitset_is_set(used_outs, o))
723 outreq = out_reg_reqs[o];
724 if (!can_match(outreq, inreq))
726 bitset_set(used_outs, i);
729 /* did we find any match? */
730 if (o < orig_out_arity)
733 /* we might need more space in the output arrays */
734 if (out_arity >= out_size) {
735 const arch_register_req_t **new_out_reg_reqs;
739 = obstack_alloc(obst, out_size*sizeof(out_reg_reqs[0]));
740 memcpy(new_out_reg_reqs, out_reg_reqs,
741 out_arity * sizeof(new_out_reg_reqs[0]));
742 out_reg_reqs = new_out_reg_reqs;
745 /* add a new (dummy) output which occupies the register */
746 assert(inreq->type & arch_register_req_type_limited);
747 out_reg_reqs[out_arity] = inreq;
752 /* append none register requirement for the memory output */
753 if (out_arity + 1 >= out_size) {
754 const arch_register_req_t **new_out_reg_reqs;
756 out_size = out_arity + 1;
758 = obstack_alloc(obst, out_size*sizeof(out_reg_reqs[0]));
759 memcpy(new_out_reg_reqs, out_reg_reqs,
760 out_arity * sizeof(new_out_reg_reqs[0]));
761 out_reg_reqs = new_out_reg_reqs;
764 /* add a new (dummy) output which occupies the register */
765 out_reg_reqs[out_arity] = arch_no_register_req;
768 new_node = new_bd_ia32_Asm(dbgi, new_block, arity, in, out_arity,
769 get_ASM_text(node), register_map);
772 be_dep_on_frame(new_node);
774 info = be_get_info(new_node);
775 for (i = 0; i < out_arity; ++i) {
776 info->out_infos[i].req = out_reg_reqs[i];
778 set_ia32_in_req_all(new_node, in_reg_reqs);
780 SET_IA32_ORIG_NODE(new_node, node);
785 ir_node *gen_CopyB(ir_node *node)
787 ir_node *block = get_new_node(get_nodes_block(node));
788 ir_node *src = get_CopyB_src(node);
789 ir_node *new_src = get_new_node(src);
790 ir_node *dst = get_CopyB_dst(node);
791 ir_node *new_dst = get_new_node(dst);
792 ir_node *mem = get_CopyB_mem(node);
793 ir_node *new_mem = get_new_node(mem);
795 dbg_info *dbgi = get_irn_dbg_info(node);
796 int size = get_type_size_bytes(get_CopyB_type(node));
799 /* If we have to copy more than 32 bytes, we use REP MOVSx and */
800 /* then we need the size explicitly in ECX. */
801 if (size >= 32 * 4) {
802 rem = size & 0x3; /* size % 4 */
805 res = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, size);
806 be_dep_on_frame(res);
808 res = new_bd_ia32_CopyB(dbgi, block, new_dst, new_src, res, new_mem, rem);
811 ir_fprintf(stderr, "Optimization warning copyb %+F with size <4\n",
814 res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size);
817 SET_IA32_ORIG_NODE(res, node);
822 ir_node *gen_Proj_tls(ir_node *node)
824 ir_node *block = get_new_node(get_nodes_block(node));
827 res = new_bd_ia32_LdTls(NULL, block, mode_Iu);
832 ir_node *gen_Unknown(ir_node *node)
834 ir_mode *mode = get_irn_mode(node);
835 ir_graph *irg = current_ir_graph;
836 dbg_info *dbgi = get_irn_dbg_info(node);
837 ir_node *block = get_irg_start_block(irg);
840 if (mode_is_float(mode)) {
841 if (ia32_cg_config.use_sse2) {
842 res = new_bd_ia32_xUnknown(dbgi, block);
844 res = new_bd_ia32_vfldz(dbgi, block);
846 } else if (ia32_mode_needs_gp_reg(mode)) {
847 res = new_bd_ia32_Unknown(dbgi, block);
849 panic("unsupported Unknown-Mode");
852 be_dep_on_frame(res);
856 const arch_register_req_t *make_register_req(const constraint_t *constraint,
857 int n_outs, const arch_register_req_t **out_reqs, int pos)
859 struct obstack *obst = get_irg_obstack(current_ir_graph);
860 int same_as = constraint->same_as;
861 arch_register_req_t *req;
864 const arch_register_req_t *other_constr;
866 if (same_as >= n_outs)
867 panic("invalid output number in same_as constraint");
869 other_constr = out_reqs[same_as];
871 req = obstack_alloc(obst, sizeof(req[0]));
872 *req = *other_constr;
873 req->type |= arch_register_req_type_should_be_same;
874 req->other_same = 1U << pos;
876 /* switch constraints. This is because in firm we have same_as
877 * constraints on the output constraints while in the gcc asm syntax
878 * they are specified on the input constraints */
879 out_reqs[same_as] = req;
883 /* pure memory ops */
884 if (constraint->cls == NULL) {
885 return &no_register_req;
888 if (constraint->allowed_registers != 0
889 && !constraint->all_registers_allowed) {
890 unsigned *limited_ptr;
892 req = obstack_alloc(obst, sizeof(req[0]) + sizeof(unsigned));
893 memset(req, 0, sizeof(req[0]));
894 limited_ptr = (unsigned*) (req+1);
896 req->type = arch_register_req_type_limited;
897 *limited_ptr = constraint->allowed_registers;
898 req->limited = limited_ptr;
900 req = obstack_alloc(obst, sizeof(req[0]));
901 memset(req, 0, sizeof(req[0]));
902 req->type = arch_register_req_type_normal;
904 req->cls = constraint->cls;
909 const arch_register_req_t *parse_clobber(const char *clobber)
911 struct obstack *obst = get_irg_obstack(current_ir_graph);
912 const arch_register_t *reg = ia32_get_clobber_register(clobber);
913 arch_register_req_t *req;
917 panic("Register '%s' mentioned in asm clobber is unknown", clobber);
920 assert(reg->index < 32);
922 limited = obstack_alloc(obst, sizeof(limited[0]));
923 *limited = 1 << reg->index;
925 req = obstack_alloc(obst, sizeof(req[0]));
926 memset(req, 0, sizeof(req[0]));
927 req->type = arch_register_req_type_limited;
928 req->cls = arch_register_get_class(reg);
929 req->limited = limited;
935 int prevents_AM(ir_node *const block, ir_node *const am_candidate,
936 ir_node *const other)
938 if (get_nodes_block(other) != block)
941 if (is_Sync(other)) {
944 for (i = get_Sync_n_preds(other) - 1; i >= 0; --i) {
945 ir_node *const pred = get_Sync_pred(other, i);
947 if (get_nodes_block(pred) != block)
950 /* Do not block ourselves from getting eaten */
951 if (is_Proj(pred) && get_Proj_pred(pred) == am_candidate)
954 if (!heights_reachable_in_block(heights, pred, am_candidate))
962 /* Do not block ourselves from getting eaten */
963 if (is_Proj(other) && get_Proj_pred(other) == am_candidate)
966 if (!heights_reachable_in_block(heights, other, am_candidate))
973 ir_node *try_create_Immediate(ir_node *node, char immediate_constraint_type)
976 ir_entity *symconst_ent = NULL;
978 ir_node *cnst = NULL;
979 ir_node *symconst = NULL;
982 mode = get_irn_mode(node);
983 if (!mode_is_int(mode) && !mode_is_reference(mode)) {
987 if (is_Const(node)) {
990 } else if (is_Global(node)) {
993 } else if (is_Add(node)) {
994 ir_node *left = get_Add_left(node);
995 ir_node *right = get_Add_right(node);
996 if (is_Const(left) && is_Global(right)) {
999 } else if (is_Global(left) && is_Const(right)) {
1008 tarval *offset = get_Const_tarval(cnst);
1009 if (!tarval_is_long(offset)) {
1010 ir_fprintf(stderr, "Optimisation Warning: tarval of %+F is not a long?\n", cnst);
1014 val = get_tarval_long(offset);
1015 if (!check_immediate_constraint(val, immediate_constraint_type))
1018 if (symconst != NULL) {
1019 if (immediate_constraint_type != 0) {
1020 /* we need full 32bits for symconsts */
1024 symconst_ent = get_Global_entity(symconst);
1026 if (cnst == NULL && symconst == NULL)
1029 new_node = ia32_create_Immediate(symconst_ent, 0, val);