2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief This file implements the common parts of IR transformation from
10 * @author Matthias Braun, Sebastian Buchwald
21 #include "betranshlp.h"
24 #include "ia32_architecture.h"
25 #include "ia32_common_transform.h"
26 #include "ia32_new_nodes.h"
28 #include "gen_ia32_new_nodes.h"
29 #include "gen_ia32_regalloc_if.h"
31 ir_heights_t *ia32_heights = NULL;
33 static int check_immediate_constraint(long val, char immediate_constraint_type)
35 switch (immediate_constraint_type) {
38 case 'I': return 0 <= val && val <= 31;
39 case 'J': return 0 <= val && val <= 63;
40 case 'K': return -128 <= val && val <= 127;
41 case 'L': return val == 0xff || val == 0xffff;
42 case 'M': return 0 <= val && val <= 3;
43 case 'N': return 0 <= val && val <= 255;
44 case 'O': return 0 <= val && val <= 127;
46 default: panic("Invalid immediate constraint found");
50 ir_type *ia32_get_prim_type(const ir_mode *mode)
52 if (mode == ia32_mode_E) {
55 return get_type_for_mode(mode);
59 ir_entity *ia32_create_float_const_entity(ia32_isa_t *isa, ir_tarval *tv,
62 ir_entity *res = pmap_get(ir_entity, isa->tv_ent, tv);
63 ir_initializer_t *initializer;
70 mode = get_tarval_mode(tv);
72 if (! ia32_cg_config.use_sse2) {
73 /* try to reduce the mode to produce smaller sized entities */
75 if (tarval_ieee754_can_conv_lossless(tv, mode_F)) {
77 tv = tarval_convert_to(tv, mode);
78 } else if (mode != mode_D) {
79 if (tarval_ieee754_can_conv_lossless(tv, mode_D)) {
81 tv = tarval_convert_to(tv, mode);
88 name = id_unique("C%u");
90 tp = ia32_get_prim_type(mode);
91 res = new_entity(get_glob_type(), name, tp);
92 set_entity_ld_ident(res, get_entity_ident(res));
93 set_entity_visibility(res, ir_visibility_private);
94 add_entity_linkage(res, IR_LINKAGE_CONSTANT);
96 initializer = create_initializer_tarval(tv);
97 set_entity_initializer(res, initializer);
99 pmap_insert(isa->tv_ent, tv, res);
103 ir_node *ia32_create_Immediate(ir_graph *const irg, ir_entity *const symconst, int const symconst_sign, long const val)
105 ir_node *start_block = get_irg_start_block(irg);
106 ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, symconst,
107 symconst_sign, ia32_no_pic_adjust, val);
108 arch_set_irn_register(immediate, &ia32_registers[REG_GP_NOREG]);
113 const arch_register_t *ia32_get_clobber_register(const char *clobber)
115 const arch_register_t *reg = NULL;
118 const arch_register_class_t *cls;
120 /* TODO: construct a hashmap instead of doing linear search for clobber
122 for (c = 0; c < N_IA32_CLASSES; ++c) {
123 cls = & ia32_reg_classes[c];
124 for (r = 0; r < cls->n_regs; ++r) {
125 const arch_register_t *temp_reg = arch_register_for_index(cls, r);
126 if (strcmp(temp_reg->name, clobber) == 0
127 || (c == CLASS_ia32_gp && strcmp(temp_reg->name+1, clobber) == 0)) {
139 int ia32_mode_needs_gp_reg(ir_mode *mode)
141 if (mode == ia32_mode_fpcw)
143 if (get_mode_size_bits(mode) > 32)
145 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
148 static void parse_asm_constraints(constraint_t *constraint, const char *c,
151 char immediate_type = '\0';
152 unsigned limited = 0;
153 const arch_register_class_t *cls = NULL;
154 int memory_possible = 0;
155 int all_registers_allowed = 0;
159 memset(constraint, 0, sizeof(constraint[0]));
160 constraint->same_as = -1;
163 /* a memory constraint: no need to do anything in backend about it
164 * (the dependencies are already respected by the memory edge of
169 /* TODO: improve error messages with node and source info. (As users can
170 * easily hit these) */
178 /* Skip out/in-out marker */
188 while (*c != 0 && *c != ',')
193 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
194 cls = &ia32_reg_classes[CLASS_ia32_gp];
195 limited |= 1 << REG_GP_EAX;
198 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
199 cls = &ia32_reg_classes[CLASS_ia32_gp];
200 limited |= 1 << REG_GP_EBX;
203 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
204 cls = &ia32_reg_classes[CLASS_ia32_gp];
205 limited |= 1 << REG_GP_ECX;
208 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
209 cls = &ia32_reg_classes[CLASS_ia32_gp];
210 limited |= 1 << REG_GP_EDX;
213 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
214 cls = &ia32_reg_classes[CLASS_ia32_gp];
215 limited |= 1 << REG_GP_EDI;
218 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
219 cls = &ia32_reg_classes[CLASS_ia32_gp];
220 limited |= 1 << REG_GP_ESI;
224 /* q means lower part of the regs only, this makes no
225 * difference to Q for us (we only assign whole registers) */
226 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
227 cls = &ia32_reg_classes[CLASS_ia32_gp];
228 limited |= 1 << REG_GP_EAX | 1 << REG_GP_EBX | 1 << REG_GP_ECX |
232 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
233 cls = &ia32_reg_classes[CLASS_ia32_gp];
234 limited |= 1 << REG_GP_EAX | 1 << REG_GP_EDX;
237 assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
238 cls = &ia32_reg_classes[CLASS_ia32_gp];
239 limited |= 1 << REG_GP_EAX | 1 << REG_GP_EBX | 1 << REG_GP_ECX |
240 1 << REG_GP_EDX | 1 << REG_GP_ESI | 1 << REG_GP_EDI |
247 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
248 panic("multiple register classes not supported");
249 cls = &ia32_reg_classes[CLASS_ia32_gp];
250 all_registers_allowed = 1;
256 /* TODO: mark values so the x87 simulator knows about t and u */
257 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_fp])
258 panic("multiple register classes not supported");
259 cls = &ia32_reg_classes[CLASS_ia32_fp];
260 all_registers_allowed = 1;
265 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_xmm])
266 panic("multiple register classes not supproted");
267 cls = &ia32_reg_classes[CLASS_ia32_xmm];
268 all_registers_allowed = 1;
278 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
279 panic("multiple register classes not supported");
280 if (immediate_type != '\0')
281 panic("multiple immediate types not supported");
282 cls = &ia32_reg_classes[CLASS_ia32_gp];
287 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
288 panic("multiple register classes not supported");
289 if (immediate_type != '\0')
290 panic("multiple immediate types not supported");
291 cls = &ia32_reg_classes[CLASS_ia32_gp];
292 immediate_type = 'i';
297 if (cls != NULL && cls != &ia32_reg_classes[CLASS_ia32_gp])
298 panic("multiple register classes not supported");
299 if (immediate_type != '\0')
300 panic("multiple immediate types not supported");
301 immediate_type = 'i';
302 cls = &ia32_reg_classes[CLASS_ia32_gp];
303 all_registers_allowed = 1;
318 panic("can only specify same constraint on input");
320 sscanf(c, "%d%n", &same_as, &p);
330 /* memory constraint no need to do anything in backend about it
331 * (the dependencies are already respected by the memory edge of
336 case 'E': /* no float consts yet */
337 case 'F': /* no float consts yet */
338 case 's': /* makes no sense on x86 */
339 case '<': /* no autodecrement on x86 */
340 case '>': /* no autoincrement on x86 */
341 case 'C': /* sse constant not supported yet */
342 case 'G': /* 80387 constant not supported yet */
343 case 'y': /* we don't support mmx registers yet */
344 case 'Z': /* not available in 32 bit mode */
345 case 'e': /* not available in 32 bit mode */
346 panic("unsupported asm constraint '%c' found in (%+F)",
347 *c, current_ir_graph);
349 panic("unknown asm constraint '%c' found in (%+F)", *c,
357 panic("same as and register constraint not supported");
358 if (immediate_type != '\0')
359 panic("same as and immediate constraint not supported");
362 if (cls == NULL && same_as < 0) {
363 if (!memory_possible)
364 panic("no constraint specified for assembler input");
367 constraint->same_as = same_as;
368 constraint->cls = cls;
369 constraint->allowed_registers = limited;
370 constraint->all_registers_allowed = all_registers_allowed;
371 constraint->memory_possible = memory_possible;
372 constraint->immediate_type = immediate_type;
375 static bool can_match(const arch_register_req_t *in,
376 const arch_register_req_t *out)
378 if (in->cls != out->cls)
380 if (!arch_register_req_is(in, limited) ||
381 !arch_register_req_is(out, limited))
384 return (*in->limited & *out->limited) != 0;
387 static inline ir_node *get_new_node(ir_node *node)
390 if (be_transformer == TRANSFORMER_DEFAULT) {
391 return be_transform_node(node);
396 return be_transform_node(node);
400 static arch_register_req_t const *ia32_make_register_req(ir_graph *irg, constraint_t const *constraint, int n_outs, arch_register_req_t const **out_reqs, int pos);
402 ir_node *ia32_gen_ASM(ir_node *node)
404 ir_node *block = get_nodes_block(node);
405 ir_node *new_block = get_new_node(block);
406 dbg_info *dbgi = get_irn_dbg_info(node);
407 int n_inputs = get_ASM_n_inputs(node);
408 int n_ins = n_inputs+1;
409 ir_node **in = ALLOCANZ(ir_node*, n_ins);
410 size_t n_clobbers = 0;
411 ident **clobbers = get_ASM_clobbers(node);
412 unsigned reg_map_size = 0;
413 ir_graph *irg = get_irn_irg(node);
414 struct obstack *obst = get_irg_obstack(irg);
415 unsigned clobber_bits[N_IA32_CLASSES];
416 memset(&clobber_bits, 0, sizeof(clobber_bits));
418 for (size_t c = 0; c < get_ASM_n_clobbers(node); ++c) {
419 const char *clobber = get_id_str(clobbers[c]);
420 const arch_register_req_t *req = ia32_parse_clobber(clobber);
424 clobber_bits[req->cls->index] |= *req->limited;
425 assert(req->cls->n_regs <= sizeof(unsigned)*8);
428 size_t n_out_constraints = get_ASM_n_output_constraints(node);
429 size_t out_arity = n_out_constraints + n_clobbers;
431 const ir_asm_constraint *in_constraints = get_ASM_input_constraints(node);
432 const ir_asm_constraint *out_constraints = get_ASM_output_constraints(node);
434 /* determine size of register_map */
435 for (size_t out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
436 const ir_asm_constraint *constraint = &out_constraints[out_idx];
437 if (constraint->pos+1 > reg_map_size)
438 reg_map_size = constraint->pos+1;
440 for (int i = 0; i < n_inputs; ++i) {
441 const ir_asm_constraint *constraint = &in_constraints[i];
442 if (constraint->pos+1 > reg_map_size)
443 reg_map_size = constraint->pos+1;
446 ia32_asm_reg_t *const register_map = NEW_ARR_DZ(ia32_asm_reg_t, obst, reg_map_size);
448 /* construct output constraints */
449 size_t out_size = out_arity + 1;
450 const arch_register_req_t **out_reg_reqs
451 = OALLOCN(obst, const arch_register_req_t*, out_size);
454 for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
455 constraint_t parsed_constraint;
456 const ir_asm_constraint *constraint = &out_constraints[out_idx];
457 const char *c = get_id_str(constraint->constraint);
458 unsigned pos = constraint->pos;
459 parse_asm_constraints(&parsed_constraint, c, true);
460 arch_register_req_t const *const req = ia32_make_register_req(irg, &parsed_constraint, n_out_constraints, out_reg_reqs, out_idx);
461 out_reg_reqs[out_idx] = req;
463 /* multiple constraints for same pos. This can happen for example when
464 * a =A constraint gets lowered to two constraints: =a and =d for the
466 if (register_map[pos].valid)
469 register_map[pos].use_input = 0;
470 register_map[pos].valid = 1;
471 register_map[pos].memory = 0;
472 register_map[pos].inout_pos = out_idx;
473 register_map[pos].mode = constraint->mode;
476 /* inputs + input constraints */
477 const arch_register_req_t **in_reg_reqs
478 = OALLOCN(obst, const arch_register_req_t*, n_ins);
479 for (int i = 0; i < n_inputs; ++i) {
480 constraint_t parsed_constraint;
481 ir_node *pred = get_ASM_input(node, i);
482 const ir_asm_constraint *constraint = &in_constraints[i];
483 ident *constr_id = constraint->constraint;
484 const char *c = get_id_str(constr_id);
485 unsigned pos = constraint->pos;
486 int is_memory_op = 0;
487 ir_node *input = NULL;
489 parse_asm_constraints(&parsed_constraint, c, false);
490 if (parsed_constraint.cls != NULL) {
491 unsigned r_clobber_bits
492 = clobber_bits[parsed_constraint.cls->index];
493 if (r_clobber_bits != 0) {
494 if (parsed_constraint.all_registers_allowed) {
495 parsed_constraint.all_registers_allowed = 0;
496 be_set_allocatable_regs(irg,
497 parsed_constraint.cls,
498 &parsed_constraint.allowed_registers);
500 parsed_constraint.allowed_registers &= ~r_clobber_bits;
504 arch_register_req_t const *const req = ia32_make_register_req(irg, &parsed_constraint, n_out_constraints, out_reg_reqs, i);
505 in_reg_reqs[i] = req;
507 if (parsed_constraint.immediate_type != '\0') {
508 char imm_type = parsed_constraint.immediate_type;
509 input = ia32_try_create_Immediate(pred, imm_type);
513 input = get_new_node(pred);
515 if (parsed_constraint.cls == NULL
516 && parsed_constraint.same_as < 0) {
518 in_reg_reqs[i] = ia32_reg_classes[CLASS_ia32_gp].class_req;
519 } else if (parsed_constraint.memory_possible) {
520 /* TODO: match Load or Load/Store if memory possible is set */
525 register_map[pos].use_input = 1;
526 register_map[pos].valid = 1;
527 register_map[pos].memory = is_memory_op;
528 register_map[pos].inout_pos = i;
529 register_map[pos].mode = constraint->mode;
532 assert(n_inputs == n_ins-1);
533 ir_node *mem = get_ASM_mem(node);
534 in[n_inputs] = be_transform_node(mem);
535 in_reg_reqs[n_inputs] = arch_no_register_req;
538 for (size_t c = 0; c < get_ASM_n_clobbers(node); ++c) {
539 const char *clobber = get_id_str(clobbers[c]);
540 const arch_register_req_t *req = ia32_parse_clobber(clobber);
543 out_reg_reqs[out_idx] = req;
547 /* Attempt to make ASM node register pressure faithful.
548 * (This does not work for complicated cases yet!)
550 * Algorithm: Check if there are fewer inputs or outputs (I will call this
551 * the smaller list). Then try to match each constraint of the smaller list
552 * to 1 of the other list. If we can't match it, then we have to add a dummy
553 * input/output to the other list
555 * FIXME: This is still broken in lots of cases. But at least better than
557 * FIXME: need to do this per register class...
559 if (out_arity <= (size_t)n_inputs) {
560 int orig_inputs = n_ins;
562 bitset_t *used_ins = bitset_alloca(n_ins);
563 for (size_t o = 0; o < out_arity; ++o) {
564 const arch_register_req_t *outreq = out_reg_reqs[o];
566 if (outreq->cls == NULL) {
571 for (i = 0; i < orig_inputs; ++i) {
572 if (bitset_is_set(used_ins, i))
574 const arch_register_req_t *inreq = in_reg_reqs[i];
575 if (!can_match(outreq, inreq))
577 bitset_set(used_ins, i);
580 /* did we find any match? */
584 /* we might need more space in the input arrays */
585 if (n_ins >= in_size) {
587 const arch_register_req_t **new_in_reg_reqs
588 = OALLOCN(obst, const arch_register_req_t*,
590 memcpy(new_in_reg_reqs, in_reg_reqs,
591 n_ins*sizeof(new_in_reg_reqs[0]));
592 ir_node **new_in = ALLOCANZ(ir_node*, in_size);
593 memcpy(new_in, in, n_ins*sizeof(new_in[0]));
595 in_reg_reqs = new_in_reg_reqs;
599 /* add a new (dummy) input which occupies the register */
600 assert(arch_register_req_is(outreq, limited));
601 in_reg_reqs[n_ins] = outreq;
602 in[n_ins] = new_bd_ia32_ProduceVal(NULL, block);
606 bitset_t *used_outs = bitset_alloca(out_arity);
607 size_t orig_out_arity = out_arity;
608 for (int i = 0; i < n_inputs; ++i) {
609 const arch_register_req_t *inreq = in_reg_reqs[i];
611 if (inreq->cls == NULL)
615 for (o = 0; o < orig_out_arity; ++o) {
616 const arch_register_req_t *outreq;
617 if (bitset_is_set(used_outs, o))
619 outreq = out_reg_reqs[o];
620 if (!can_match(outreq, inreq))
622 bitset_set(used_outs, i);
625 /* did we find any match? */
626 if (o < orig_out_arity)
629 /* we might need more space in the output arrays */
630 if (out_arity >= out_size) {
631 const arch_register_req_t **new_out_reg_reqs;
635 = OALLOCN(obst, const arch_register_req_t*, out_size);
636 memcpy(new_out_reg_reqs, out_reg_reqs,
637 out_arity * sizeof(new_out_reg_reqs[0]));
638 out_reg_reqs = new_out_reg_reqs;
641 /* add a new (dummy) output which occupies the register */
642 assert(arch_register_req_is(inreq, limited));
643 out_reg_reqs[out_arity] = inreq;
648 /* append none register requirement for the memory output */
649 if (out_arity + 1 >= out_size) {
650 const arch_register_req_t **new_out_reg_reqs;
652 out_size = out_arity + 1;
654 = OALLOCN(obst, const arch_register_req_t*, out_size);
655 memcpy(new_out_reg_reqs, out_reg_reqs,
656 out_arity * sizeof(new_out_reg_reqs[0]));
657 out_reg_reqs = new_out_reg_reqs;
660 /* add a new (dummy) output which occupies the register */
661 out_reg_reqs[out_arity] = arch_no_register_req;
664 ir_node *new_node = new_bd_ia32_Asm(dbgi, new_block, n_ins, in, out_arity,
665 get_ASM_text(node), register_map);
667 backend_info_t *info = be_get_info(new_node);
668 for (size_t o = 0; o < out_arity; ++o) {
669 info->out_infos[o].req = out_reg_reqs[o];
671 arch_set_irn_register_reqs_in(new_node, in_reg_reqs);
673 SET_IA32_ORIG_NODE(new_node, node);
678 ir_node *ia32_gen_CopyB(ir_node *node)
680 ir_node *block = get_new_node(get_nodes_block(node));
681 ir_node *src = get_CopyB_src(node);
682 ir_node *new_src = get_new_node(src);
683 ir_node *dst = get_CopyB_dst(node);
684 ir_node *new_dst = get_new_node(dst);
685 ir_node *mem = get_CopyB_mem(node);
686 ir_node *new_mem = get_new_node(mem);
688 dbg_info *dbgi = get_irn_dbg_info(node);
689 int size = get_type_size_bytes(get_CopyB_type(node));
690 int throws_exception = ir_throws_exception(node);
693 /* If we have to copy more than 32 bytes, we use REP MOVSx and */
694 /* then we need the size explicitly in ECX. */
695 if (size >= 32 * 4) {
696 rem = size & 0x3; /* size % 4 */
699 res = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, size);
701 res = new_bd_ia32_CopyB(dbgi, block, new_dst, new_src, res, new_mem, rem);
704 ir_fprintf(stderr, "Optimization warning copyb %+F with size <4\n",
707 res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size);
709 ir_set_throws_exception(res, throws_exception);
711 SET_IA32_ORIG_NODE(res, node);
716 ir_node *ia32_gen_Proj_tls(ir_node *node)
718 ir_node *block = get_new_node(get_nodes_block(node));
719 ir_node *res = new_bd_ia32_LdTls(NULL, block);
723 ir_node *ia32_gen_Unknown(ir_node *node)
725 ir_mode *mode = get_irn_mode(node);
726 ir_graph *irg = current_ir_graph;
727 dbg_info *dbgi = get_irn_dbg_info(node);
728 ir_node *block = get_irg_start_block(irg);
731 if (mode_is_float(mode)) {
732 if (ia32_cg_config.use_sse2) {
733 res = new_bd_ia32_xUnknown(dbgi, block);
735 res = new_bd_ia32_fldz(dbgi, block);
737 } else if (ia32_mode_needs_gp_reg(mode)) {
738 res = new_bd_ia32_Unknown(dbgi, block);
740 panic("unsupported Unknown-Mode");
746 static arch_register_req_t const *ia32_make_register_req(ir_graph *const irg, constraint_t const *const c, int const n_outs, arch_register_req_t const **const out_reqs, int const pos)
748 int const same_as = c->same_as;
750 if (same_as >= n_outs)
751 panic("invalid output number in same_as constraint");
753 struct obstack *const obst = get_irg_obstack(irg);
754 arch_register_req_t *const req = OALLOC(obst, arch_register_req_t);
755 arch_register_req_t const *const other = out_reqs[same_as];
757 req->type |= arch_register_req_type_should_be_same;
758 req->other_same = 1U << pos;
760 /* Switch constraints. This is because in firm we have same_as
761 * constraints on the output constraints while in the gcc asm syntax
762 * they are specified on the input constraints. */
763 out_reqs[same_as] = req;
767 /* Pure memory ops. */
769 return arch_no_register_req;
771 if (c->allowed_registers == 0 || c->all_registers_allowed)
772 return c->cls->class_req;
774 struct obstack *const obst = get_irg_obstack(irg);
775 arch_register_req_t *const req = (arch_register_req_t*)obstack_alloc(obst, sizeof(req[0]) + sizeof(unsigned));
776 unsigned *const limited = (unsigned*)(req + 1);
777 *limited = c->allowed_registers;
779 memset(req, 0, sizeof(req[0]));
780 req->type = arch_register_req_type_limited;
782 req->limited = limited;
787 const arch_register_req_t *ia32_parse_clobber(const char *clobber)
789 if (strcmp(clobber, "memory") == 0 || strcmp(clobber, "cc") == 0)
792 arch_register_t const *const reg = ia32_get_clobber_register(clobber);
794 panic("Register '%s' mentioned in asm clobber is unknown", clobber);
796 return reg->single_req;
800 int ia32_prevents_AM(ir_node *const block, ir_node *const am_candidate,
801 ir_node *const other)
803 if (get_nodes_block(other) != block)
806 if (is_Sync(other)) {
809 for (i = get_Sync_n_preds(other) - 1; i >= 0; --i) {
810 ir_node *const pred = get_Sync_pred(other, i);
812 if (get_nodes_block(pred) != block)
815 /* Do not block ourselves from getting eaten */
816 if (is_Proj(pred) && get_Proj_pred(pred) == am_candidate)
819 if (!heights_reachable_in_block(ia32_heights, pred, am_candidate))
827 /* Do not block ourselves from getting eaten */
828 if (is_Proj(other) && get_Proj_pred(other) == am_candidate)
831 if (!heights_reachable_in_block(ia32_heights, other, am_candidate))
838 ir_node *ia32_try_create_Immediate(ir_node *node, char immediate_constraint_type)
840 ir_mode *const mode = get_irn_mode(node);
841 if (!mode_is_int(mode) && !mode_is_reference(mode))
846 if (is_Const(node)) {
849 } else if (is_SymConst_addr_ent(node)
850 && get_entity_owner(get_SymConst_entity(node)) != get_tls_type()) {
853 } else if (is_Add(node)) {
854 ir_node *left = get_Add_left(node);
855 ir_node *right = get_Add_right(node);
856 if (is_Const(left) && is_SymConst_addr_ent(right)) {
859 } else if (is_SymConst_addr_ent(left) && is_Const(right)) {
871 ir_tarval *offset = get_Const_tarval(cnst);
872 if (!tarval_is_long(offset)) {
873 ir_fprintf(stderr, "Optimisation Warning: tarval of %+F is not a long?\n", cnst);
877 val = get_tarval_long(offset);
878 if (!check_immediate_constraint(val, immediate_constraint_type))
882 ir_entity *symconst_ent = NULL;
883 if (symconst != NULL) {
884 /* we need full 32bits for symconsts */
885 if (immediate_constraint_type != 'i')
888 symconst_ent = get_SymConst_entity(symconst);
891 ir_graph *const irg = get_irn_irg(node);
892 return ia32_create_Immediate(irg, symconst_ent, 0, val);