2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the common parts of IR transformation from
23 * firm into ia32-Firm.
24 * @author Matthias Braun, Sebastian Buchwald
25 * @version $Id: ia32_common_transform.c 21012 2008-08-06 13:35:17Z beck $
35 #include "../betranshlp.h"
39 #include "ia32_architecture.h"
40 #include "ia32_common_transform.h"
41 #include "ia32_new_nodes.h"
43 #include "gen_ia32_new_nodes.h"
44 #include "gen_ia32_regalloc_if.h"
46 /** hold the current code generator during transformation */
47 ia32_code_gen_t *env_cg = NULL;
49 heights_t *heights = NULL;
51 static const arch_register_req_t no_register_req = {
52 arch_register_req_type_none,
54 NULL, /* limit bitset */
59 static int check_immediate_constraint(long val, char immediate_constraint_type)
61 switch (immediate_constraint_type) {
65 case 'I': return 0 <= val && val <= 31;
66 case 'J': return 0 <= val && val <= 63;
67 case 'K': return -128 <= val && val <= 127;
68 case 'L': return val == 0xff || val == 0xffff;
69 case 'M': return 0 <= val && val <= 3;
70 case 'N': return 0 <= val && val <= 255;
71 case 'O': return 0 <= val && val <= 127;
73 default: panic("Invalid immediate constraint found");
77 /* creates a unique ident by adding a number to a tag */
78 ident *ia32_unique_id(const char *tag)
80 static unsigned id = 0;
83 snprintf(str, sizeof(str), tag, ++id);
84 return new_id_from_str(str);
88 * Get a primitive type for a mode with alignment 16.
90 static ir_type *ia32_get_prim_type(pmap *types, ir_mode *mode)
92 pmap_entry *e = pmap_find(types, mode);
96 res = new_type_primitive(mode);
97 if (get_mode_size_bits(mode) >= 80) {
98 set_type_alignment_bytes(res, 16);
100 pmap_insert(types, mode, res);
107 ir_entity *create_float_const_entity(ir_node *cnst)
109 ia32_isa_t *isa = env_cg->isa;
110 tarval *key = get_Const_tarval(cnst);
111 pmap_entry *e = pmap_find(isa->tv_ent, key);
117 ir_mode *mode = get_tarval_mode(tv);
120 if (! ia32_cg_config.use_sse2) {
121 /* try to reduce the mode to produce smaller sized entities */
122 if (mode != mode_F) {
123 if (tarval_ieee754_can_conv_lossless(tv, mode_F)) {
125 tv = tarval_convert_to(tv, mode);
126 } else if (mode != mode_D) {
127 if (tarval_ieee754_can_conv_lossless(tv, mode_D)) {
129 tv = tarval_convert_to(tv, mode);
135 if (mode == get_irn_mode(cnst)) {
136 /* mode was not changed */
137 tp = get_Const_type(cnst);
138 if (tp == firm_unknown_type)
139 tp = ia32_get_prim_type(isa->types, mode);
141 tp = ia32_get_prim_type(isa->types, mode);
143 res = new_entity(get_glob_type(), ia32_unique_id(".LC%u"), tp);
145 set_entity_ld_ident(res, get_entity_ident(res));
146 set_entity_visibility(res, ir_visibility_local);
147 add_entity_linkage(res, IR_LINKAGE_CONSTANT);
149 /* we create a new entity here: It's initialization must resist on the
151 rem = current_ir_graph;
152 current_ir_graph = get_const_code_irg();
153 set_atomic_ent_value(res, new_Const_type(tv, tp));
154 current_ir_graph = rem;
156 pmap_insert(isa->tv_ent, key, res);
164 ir_node *ia32_create_Immediate(ir_entity *symconst, int symconst_sign, long val)
166 ir_graph *irg = current_ir_graph;
167 ir_node *start_block = get_irg_start_block(irg);
168 ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, symconst,
169 symconst_sign, no_pic_adjust, val);
170 arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
175 const arch_register_t *ia32_get_clobber_register(const char *clobber)
177 const arch_register_t *reg = NULL;
180 const arch_register_class_t *cls;
182 /* TODO: construct a hashmap instead of doing linear search for clobber
184 for (c = 0; c < N_CLASSES; ++c) {
185 cls = & ia32_reg_classes[c];
186 for (r = 0; r < cls->n_regs; ++r) {
187 const arch_register_t *temp_reg = arch_register_for_index(cls, r);
188 if (strcmp(temp_reg->name, clobber) == 0
189 || (c == CLASS_ia32_gp && strcmp(temp_reg->name+1, clobber) == 0)) {
201 int ia32_mode_needs_gp_reg(ir_mode *mode)
203 if (mode == mode_fpcw)
205 if (get_mode_size_bits(mode) > 32)
207 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
210 static void parse_asm_constraints(constraint_t *constraint, const char *c,
213 char immediate_type = '\0';
214 unsigned limited = 0;
215 const arch_register_class_t *cls = NULL;
216 int memory_possible = 0;
217 int all_registers_allowed = 0;
221 memset(constraint, 0, sizeof(constraint[0]));
222 constraint->same_as = -1;
225 /* a memory constraint: no need to do anything in backend about it
226 * (the dependencies are already respected by the memory edge of
231 /* TODO: improve error messages with node and source info. (As users can
232 * easily hit these) */
240 /* Skip out/in-out marker */
250 while (*c != 0 && *c != ',')
255 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
256 cls = &ia32_reg_classes[CLASS_ia32_gp];
257 limited |= 1 << REG_EAX;
260 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
261 cls = &ia32_reg_classes[CLASS_ia32_gp];
262 limited |= 1 << REG_EBX;
265 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
266 cls = &ia32_reg_classes[CLASS_ia32_gp];
267 limited |= 1 << REG_ECX;
270 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
271 cls = &ia32_reg_classes[CLASS_ia32_gp];
272 limited |= 1 << REG_EDX;
275 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
276 cls = &ia32_reg_classes[CLASS_ia32_gp];
277 limited |= 1 << REG_EDI;
280 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
281 cls = &ia32_reg_classes[CLASS_ia32_gp];
282 limited |= 1 << REG_ESI;
286 /* q means lower part of the regs only, this makes no
287 * difference to Q for us (we only assign whole registers) */
288 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
289 cls = &ia32_reg_classes[CLASS_ia32_gp];
290 limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
294 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
295 cls = &ia32_reg_classes[CLASS_ia32_gp];
296 limited |= 1 << REG_EAX | 1 << REG_EDX;
299 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
300 cls = &ia32_reg_classes[CLASS_ia32_gp];
301 limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
302 1 << REG_EDX | 1 << REG_ESI | 1 << REG_EDI |
309 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
310 panic("multiple register classes not supported");
311 cls = &ia32_reg_classes[CLASS_ia32_gp];
312 all_registers_allowed = 1;
318 /* TODO: mark values so the x87 simulator knows about t and u */
319 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_vfp])
320 panic("multiple register classes not supported");
321 cls = &ia32_reg_classes[CLASS_ia32_vfp];
322 all_registers_allowed = 1;
327 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_xmm])
328 panic("multiple register classes not supproted");
329 cls = &ia32_reg_classes[CLASS_ia32_xmm];
330 all_registers_allowed = 1;
340 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
341 panic("multiple register classes not supported");
342 if (immediate_type != '\0')
343 panic("multiple immediate types not supported");
344 cls = &ia32_reg_classes[CLASS_ia32_gp];
349 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
350 panic("multiple register classes not supported");
351 if (immediate_type != '\0')
352 panic("multiple immediate types not supported");
353 cls = &ia32_reg_classes[CLASS_ia32_gp];
354 immediate_type = 'i';
359 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
360 panic("multiple register classes not supported");
361 if (immediate_type != '\0')
362 panic("multiple immediate types not supported");
363 immediate_type = 'i';
364 cls = &ia32_reg_classes[CLASS_ia32_gp];
365 all_registers_allowed = 1;
380 panic("can only specify same constraint on input");
382 sscanf(c, "%d%n", &same_as, &p);
392 /* memory constraint no need to do anything in backend about it
393 * (the dependencies are already respected by the memory edge of
398 case 'E': /* no float consts yet */
399 case 'F': /* no float consts yet */
400 case 's': /* makes no sense on x86 */
401 case '<': /* no autodecrement on x86 */
402 case '>': /* no autoincrement on x86 */
403 case 'C': /* sse constant not supported yet */
404 case 'G': /* 80387 constant not supported yet */
405 case 'y': /* we don't support mmx registers yet */
406 case 'Z': /* not available in 32 bit mode */
407 case 'e': /* not available in 32 bit mode */
408 panic("unsupported asm constraint '%c' found in (%+F)",
409 *c, current_ir_graph);
412 panic("unknown asm constraint '%c' found in (%+F)", *c,
421 panic("same as and register constraint not supported");
422 if (immediate_type != '\0')
423 panic("same as and immediate constraint not supported");
426 if (cls == NULL && same_as < 0) {
427 if (!memory_possible)
428 panic("no constraint specified for assembler input");
431 constraint->same_as = same_as;
432 constraint->cls = cls;
433 constraint->allowed_registers = limited;
434 constraint->all_registers_allowed = all_registers_allowed;
435 constraint->memory_possible = memory_possible;
436 constraint->immediate_type = immediate_type;
439 static bool can_match(const arch_register_req_t *in,
440 const arch_register_req_t *out)
442 if (in->cls != out->cls)
444 if ( (in->type & arch_register_req_type_limited) == 0
445 || (out->type & arch_register_req_type_limited) == 0 )
448 return (*in->limited & *out->limited) != 0;
451 static inline ir_node *get_new_node(ir_node *node)
454 if (be_transformer == TRANSFORMER_DEFAULT) {
455 return be_transform_node(node);
460 return be_transform_node(node);
464 ir_node *gen_ASM(ir_node *node)
466 ir_node *block = get_nodes_block(node);
467 ir_node *new_block = get_new_node(block);
468 dbg_info *dbgi = get_irn_dbg_info(node);
474 int n_out_constraints;
476 const arch_register_req_t **out_reg_reqs;
477 const arch_register_req_t **in_reg_reqs;
478 ia32_asm_reg_t *register_map;
479 unsigned reg_map_size = 0;
480 struct obstack *obst;
481 const ir_asm_constraint *in_constraints;
482 const ir_asm_constraint *out_constraints;
484 int clobbers_flags = 0;
485 unsigned clobber_bits[N_CLASSES];
487 backend_info_t *info;
489 memset(&clobber_bits, 0, sizeof(clobber_bits));
491 /* workaround for lots of buggy code out there as most people think volatile
492 * asm is enough for everything and forget the flags (linux kernel, etc.)
494 if (get_irn_pinned(node) == op_pin_state_pinned) {
498 arity = get_irn_arity(node);
499 in = ALLOCANZ(ir_node*, arity);
501 clobbers = get_ASM_clobbers(node);
503 for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
504 const arch_register_req_t *req;
505 const char *c = get_id_str(clobbers[i]);
507 if (strcmp(c, "memory") == 0)
509 if (strcmp(c, "cc") == 0) {
514 req = parse_clobber(c);
515 clobber_bits[req->cls->index] |= *req->limited;
519 n_out_constraints = get_ASM_n_output_constraints(node);
520 out_arity = n_out_constraints + n_clobbers;
522 in_constraints = get_ASM_input_constraints(node);
523 out_constraints = get_ASM_output_constraints(node);
525 /* determine size of register_map */
526 for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
527 const ir_asm_constraint *constraint = &out_constraints[out_idx];
528 if (constraint->pos > reg_map_size)
529 reg_map_size = constraint->pos;
531 for (i = 0; i < arity; ++i) {
532 const ir_asm_constraint *constraint = &in_constraints[i];
533 if (constraint->pos > reg_map_size)
534 reg_map_size = constraint->pos;
538 obst = get_irg_obstack(current_ir_graph);
539 register_map = NEW_ARR_D(ia32_asm_reg_t, obst, reg_map_size);
540 memset(register_map, 0, reg_map_size * sizeof(register_map[0]));
542 /* construct output constraints */
543 out_size = out_arity + 1;
544 out_reg_reqs = obstack_alloc(obst, out_size * sizeof(out_reg_reqs[0]));
546 for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
547 const ir_asm_constraint *constraint = &out_constraints[out_idx];
548 const char *c = get_id_str(constraint->constraint);
549 unsigned pos = constraint->pos;
550 constraint_t parsed_constraint;
551 const arch_register_req_t *req;
553 parse_asm_constraints(&parsed_constraint, c, 1);
554 req = make_register_req(&parsed_constraint, n_out_constraints,
555 out_reg_reqs, out_idx);
556 out_reg_reqs[out_idx] = req;
558 register_map[pos].use_input = 0;
559 register_map[pos].valid = 1;
560 register_map[pos].memory = 0;
561 register_map[pos].inout_pos = out_idx;
562 register_map[pos].mode = constraint->mode;
565 /* inputs + input constraints */
566 in_reg_reqs = obstack_alloc(obst, arity * sizeof(in_reg_reqs[0]));
567 for (i = 0; i < arity; ++i) {
568 ir_node *pred = get_irn_n(node, i);
569 const ir_asm_constraint *constraint = &in_constraints[i];
570 ident *constr_id = constraint->constraint;
571 const char *c = get_id_str(constr_id);
572 unsigned pos = constraint->pos;
573 int is_memory_op = 0;
574 ir_node *input = NULL;
575 unsigned r_clobber_bits;
576 constraint_t parsed_constraint;
577 const arch_register_req_t *req;
579 parse_asm_constraints(&parsed_constraint, c, 0);
580 if (parsed_constraint.cls != NULL) {
581 r_clobber_bits = clobber_bits[parsed_constraint.cls->index];
582 if (r_clobber_bits != 0) {
583 if (parsed_constraint.all_registers_allowed) {
584 parsed_constraint.all_registers_allowed = 0;
585 be_abi_set_non_ignore_regs(env_cg->birg->abi,
586 parsed_constraint.cls,
587 &parsed_constraint.allowed_registers);
589 parsed_constraint.allowed_registers &= ~r_clobber_bits;
593 req = make_register_req(&parsed_constraint, n_out_constraints,
595 in_reg_reqs[i] = req;
597 if (parsed_constraint.immediate_type != '\0') {
598 char imm_type = parsed_constraint.immediate_type;
599 input = try_create_Immediate(pred, imm_type);
603 ir_node *pred = get_irn_n(node, i);
604 input = get_new_node(pred);
606 if (parsed_constraint.cls == NULL
607 && parsed_constraint.same_as < 0) {
609 } else if (parsed_constraint.memory_possible) {
610 /* TODO: match Load or Load/Store if memory possible is set */
615 register_map[pos].use_input = 1;
616 register_map[pos].valid = 1;
617 register_map[pos].memory = is_memory_op;
618 register_map[pos].inout_pos = i;
619 register_map[pos].mode = constraint->mode;
623 for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
624 const char *c = get_id_str(clobbers[i]);
625 const arch_register_req_t *req;
627 if (strcmp(c, "memory") == 0 || strcmp(c, "cc") == 0)
630 req = parse_clobber(c);
631 out_reg_reqs[out_idx] = req;
635 /* Attempt to make ASM node register pressure faithful.
636 * (This does not work for complicated cases yet!)
638 * Algorithm: Check if there are fewer inputs or outputs (I will call this
639 * the smaller list). Then try to match each constraint of the smaller list
640 * to 1 of the other list. If we can't match it, then we have to add a dummy
641 * input/output to the other list
643 * FIXME: This is still broken in lots of cases. But at least better than
645 * FIXME: need to do this per register class...
647 if (out_arity <= arity) {
648 int orig_arity = arity;
651 bitset_t *used_ins = bitset_alloca(arity);
652 for (o = 0; o < out_arity; ++o) {
654 const arch_register_req_t *outreq = out_reg_reqs[o];
656 if (outreq->cls == NULL) {
660 for (i = 0; i < orig_arity; ++i) {
661 const arch_register_req_t *inreq;
662 if (bitset_is_set(used_ins, i))
664 inreq = in_reg_reqs[i];
665 if (!can_match(outreq, inreq))
667 bitset_set(used_ins, i);
670 /* did we find any match? */
674 /* we might need more space in the input arrays */
675 if (arity >= in_size) {
676 const arch_register_req_t **new_in_reg_reqs;
681 = obstack_alloc(obst, in_size*sizeof(in_reg_reqs[0]));
682 memcpy(new_in_reg_reqs, in_reg_reqs, arity * sizeof(new_in_reg_reqs[0]));
683 new_in = ALLOCANZ(ir_node*, in_size);
684 memcpy(new_in, in, arity*sizeof(new_in[0]));
686 in_reg_reqs = new_in_reg_reqs;
690 /* add a new (dummy) input which occupies the register */
691 assert(outreq->type & arch_register_req_type_limited);
692 in_reg_reqs[arity] = outreq;
693 in[arity] = new_bd_ia32_ProduceVal(NULL, block);
694 be_dep_on_frame(in[arity]);
699 bitset_t *used_outs = bitset_alloca(out_arity);
700 int orig_out_arity = out_arity;
701 for (i = 0; i < arity; ++i) {
703 const arch_register_req_t *inreq = in_reg_reqs[i];
705 if (inreq->cls == NULL) {
709 for (o = 0; o < orig_out_arity; ++o) {
710 const arch_register_req_t *outreq;
711 if (bitset_is_set(used_outs, o))
713 outreq = out_reg_reqs[o];
714 if (!can_match(outreq, inreq))
716 bitset_set(used_outs, i);
719 /* did we find any match? */
720 if (o < orig_out_arity)
723 /* we might need more space in the output arrays */
724 if (out_arity >= out_size) {
725 const arch_register_req_t **new_out_reg_reqs;
729 = obstack_alloc(obst, out_size*sizeof(out_reg_reqs[0]));
730 memcpy(new_out_reg_reqs, out_reg_reqs,
731 out_arity * sizeof(new_out_reg_reqs[0]));
732 out_reg_reqs = new_out_reg_reqs;
735 /* add a new (dummy) output which occupies the register */
736 assert(inreq->type & arch_register_req_type_limited);
737 out_reg_reqs[out_arity] = inreq;
742 /* append none register requirement for the memory output */
743 if (out_arity + 1 >= out_size) {
744 const arch_register_req_t **new_out_reg_reqs;
746 out_size = out_arity + 1;
748 = obstack_alloc(obst, out_size*sizeof(out_reg_reqs[0]));
749 memcpy(new_out_reg_reqs, out_reg_reqs,
750 out_arity * sizeof(new_out_reg_reqs[0]));
751 out_reg_reqs = new_out_reg_reqs;
754 /* add a new (dummy) output which occupies the register */
755 out_reg_reqs[out_arity] = arch_no_register_req;
758 new_node = new_bd_ia32_Asm(dbgi, new_block, arity, in, out_arity,
759 get_ASM_text(node), register_map);
762 be_dep_on_frame(new_node);
764 info = be_get_info(new_node);
765 for (i = 0; i < out_arity; ++i) {
766 info->out_infos[i].req = out_reg_reqs[i];
768 set_ia32_in_req_all(new_node, in_reg_reqs);
770 SET_IA32_ORIG_NODE(new_node, node);
775 ir_node *gen_CopyB(ir_node *node)
777 ir_node *block = get_new_node(get_nodes_block(node));
778 ir_node *src = get_CopyB_src(node);
779 ir_node *new_src = get_new_node(src);
780 ir_node *dst = get_CopyB_dst(node);
781 ir_node *new_dst = get_new_node(dst);
782 ir_node *mem = get_CopyB_mem(node);
783 ir_node *new_mem = get_new_node(mem);
785 dbg_info *dbgi = get_irn_dbg_info(node);
786 int size = get_type_size_bytes(get_CopyB_type(node));
789 /* If we have to copy more than 32 bytes, we use REP MOVSx and */
790 /* then we need the size explicitly in ECX. */
791 if (size >= 32 * 4) {
792 rem = size & 0x3; /* size % 4 */
795 res = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, size);
796 be_dep_on_frame(res);
798 res = new_bd_ia32_CopyB(dbgi, block, new_dst, new_src, res, new_mem, rem);
801 ir_fprintf(stderr, "Optimization warning copyb %+F with size <4\n",
804 res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size);
807 SET_IA32_ORIG_NODE(res, node);
812 ir_node *gen_Proj_tls(ir_node *node)
814 ir_node *block = get_new_node(get_nodes_block(node));
817 res = new_bd_ia32_LdTls(NULL, block, mode_Iu);
822 ir_node *gen_Unknown(ir_node *node)
824 ir_mode *mode = get_irn_mode(node);
825 ir_graph *irg = current_ir_graph;
826 dbg_info *dbgi = get_irn_dbg_info(node);
827 ir_node *block = get_irg_start_block(irg);
830 if (mode_is_float(mode)) {
831 if (ia32_cg_config.use_sse2) {
832 res = new_bd_ia32_xUnknown(dbgi, block);
834 res = new_bd_ia32_vfldz(dbgi, block);
836 } else if (ia32_mode_needs_gp_reg(mode)) {
837 res = new_bd_ia32_Unknown(dbgi, block);
839 panic("unsupported Unknown-Mode");
842 be_dep_on_frame(res);
846 const arch_register_req_t *make_register_req(const constraint_t *constraint,
847 int n_outs, const arch_register_req_t **out_reqs, int pos)
849 struct obstack *obst = get_irg_obstack(current_ir_graph);
850 int same_as = constraint->same_as;
851 arch_register_req_t *req;
854 const arch_register_req_t *other_constr;
856 if (same_as >= n_outs)
857 panic("invalid output number in same_as constraint");
859 other_constr = out_reqs[same_as];
861 req = obstack_alloc(obst, sizeof(req[0]));
862 *req = *other_constr;
863 req->type |= arch_register_req_type_should_be_same;
864 req->other_same = 1U << pos;
866 /* switch constraints. This is because in firm we have same_as
867 * constraints on the output constraints while in the gcc asm syntax
868 * they are specified on the input constraints */
869 out_reqs[same_as] = req;
873 /* pure memory ops */
874 if (constraint->cls == NULL) {
875 return &no_register_req;
878 if (constraint->allowed_registers != 0
879 && !constraint->all_registers_allowed) {
880 unsigned *limited_ptr;
882 req = obstack_alloc(obst, sizeof(req[0]) + sizeof(unsigned));
883 memset(req, 0, sizeof(req[0]));
884 limited_ptr = (unsigned*) (req+1);
886 req->type = arch_register_req_type_limited;
887 *limited_ptr = constraint->allowed_registers;
888 req->limited = limited_ptr;
890 req = obstack_alloc(obst, sizeof(req[0]));
891 memset(req, 0, sizeof(req[0]));
892 req->type = arch_register_req_type_normal;
894 req->cls = constraint->cls;
899 const arch_register_req_t *parse_clobber(const char *clobber)
901 struct obstack *obst = get_irg_obstack(current_ir_graph);
902 const arch_register_t *reg = ia32_get_clobber_register(clobber);
903 arch_register_req_t *req;
907 panic("Register '%s' mentioned in asm clobber is unknown", clobber);
910 assert(reg->index < 32);
912 limited = obstack_alloc(obst, sizeof(limited[0]));
913 *limited = 1 << reg->index;
915 req = obstack_alloc(obst, sizeof(req[0]));
916 memset(req, 0, sizeof(req[0]));
917 req->type = arch_register_req_type_limited;
918 req->cls = arch_register_get_class(reg);
919 req->limited = limited;
925 int prevents_AM(ir_node *const block, ir_node *const am_candidate,
926 ir_node *const other)
928 if (get_nodes_block(other) != block)
931 if (is_Sync(other)) {
934 for (i = get_Sync_n_preds(other) - 1; i >= 0; --i) {
935 ir_node *const pred = get_Sync_pred(other, i);
937 if (get_nodes_block(pred) != block)
940 /* Do not block ourselves from getting eaten */
941 if (is_Proj(pred) && get_Proj_pred(pred) == am_candidate)
944 if (!heights_reachable_in_block(heights, pred, am_candidate))
952 /* Do not block ourselves from getting eaten */
953 if (is_Proj(other) && get_Proj_pred(other) == am_candidate)
956 if (!heights_reachable_in_block(heights, other, am_candidate))
963 ir_node *try_create_Immediate(ir_node *node, char immediate_constraint_type)
966 ir_entity *symconst_ent = NULL;
968 ir_node *cnst = NULL;
969 ir_node *symconst = NULL;
972 mode = get_irn_mode(node);
973 if (!mode_is_int(mode) && !mode_is_reference(mode)) {
977 if (is_Const(node)) {
980 } else if (is_Global(node)) {
983 } else if (is_Add(node)) {
984 ir_node *left = get_Add_left(node);
985 ir_node *right = get_Add_right(node);
986 if (is_Const(left) && is_Global(right)) {
989 } else if (is_Global(left) && is_Const(right)) {
998 tarval *offset = get_Const_tarval(cnst);
999 if (!tarval_is_long(offset)) {
1000 ir_fprintf(stderr, "Optimisation Warning: tarval of %+F is not a long?\n", cnst);
1004 val = get_tarval_long(offset);
1005 if (!check_immediate_constraint(val, immediate_constraint_type))
1008 if (symconst != NULL) {
1009 if (immediate_constraint_type != 0) {
1010 /* we need full 32bits for symconsts */
1014 symconst_ent = get_Global_entity(symconst);
1016 if (cnst == NULL && symconst == NULL)
1019 new_node = ia32_create_Immediate(symconst_ent, 0, val);