forked from rust-lang/rust
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtrans.rs
5777 lines (5301 loc) · 204 KB
/
trans.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// trans.rs: Translate the completed AST to the LLVM IR.
//
// Some functions here, such as trans_block and trans_expr, return a value --
// the result of the translation to LLVM -- while others, such as trans_fn,
// trans_obj, and trans_item, are called only for the side effect of adding a
// particular definition to the LLVM IR output we're producing.
//
// Hopefully useful general knowledge about trans:
//
// * There's no way to find out the ty::t type of a ValueRef. Doing so
// would be "trying to get the eggs out of an omelette" (credit:
// pcwalton). You can, instead, find out its TypeRef by calling val_ty,
// but many TypeRefs correspond to one ty::t; for instance, tup(int, int,
// int) and rec(x=int, y=int, z=int) will have the same TypeRef.
import std::{map, time};
import std::map::hashmap;
import std::map::{new_int_hash, new_str_hash};
import option::{some, none};
import driver::session;
import front::attr;
import middle::{ty, gc, resolve, debuginfo};
import middle::freevars::*;
import back::{link, abi, upcall};
import syntax::{ast, ast_util, codemap};
import syntax::visit;
import syntax::codemap::span;
import syntax::print::pprust::{expr_to_str, stmt_to_str};
import visit::vt;
import util::common::*;
import lib::llvm::{llvm, mk_target_data, mk_type_names};
import lib::llvm::llvm::{ModuleRef, ValueRef, TypeRef, BasicBlockRef};
import lib::llvm::{True, False};
import link::{mangle_internal_name_by_type_only,
mangle_internal_name_by_seq,
mangle_internal_name_by_path,
mangle_internal_name_by_path_and_seq,
mangle_exported_name};
import metadata::{csearch, cstore};
import util::ppaux::{ty_to_str, ty_to_short_str};
import trans_common::*;
import trans_build::*;
import trans_objects::{trans_anon_obj, trans_obj};
import tvec = trans_vec;
fn type_of_1(bcx: @block_ctxt, t: ty::t) -> TypeRef {
let cx = bcx_ccx(bcx);
check type_has_static_size(cx, t);
type_of(cx, bcx.sp, t)
}
fn type_of(cx: @crate_ctxt, sp: span, t: ty::t) : type_has_static_size(cx, t)
-> TypeRef {
// Should follow from type_has_static_size -- argh.
// FIXME (requires Issue #586)
check non_ty_var(cx, t);
type_of_inner(cx, sp, t)
}
fn type_of_explicit_args(cx: @crate_ctxt, sp: span, inputs: [ty::arg]) ->
[TypeRef] {
let atys = [];
for arg in inputs {
let arg_ty = arg.ty;
// FIXME: would be nice to have a constraint on arg
// that would obviate the need for this check
check non_ty_var(cx, arg_ty);
let llty = type_of_inner(cx, sp, arg_ty);
atys += [arg.mode == ast::by_val ? llty : T_ptr(llty)];
}
ret atys;
}
// NB: must keep 4 fns in sync:
//
// - type_of_fn
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
fn type_of_fn(cx: @crate_ctxt, sp: span, is_method: bool, inputs: [ty::arg],
output: ty::t, params: [ty::param_bounds]) -> TypeRef {
let atys: [TypeRef] = [];
// Arg 0: Output pointer.
check non_ty_var(cx, output);
let out_ty = T_ptr(type_of_inner(cx, sp, output));
atys += [out_ty];
// Arg 1: Env (closure-bindings / self-obj)
if is_method {
atys += [T_ptr(cx.rust_object_type)];
} else {
atys += [T_opaque_cbox_ptr(cx)];
}
// Args >2: ty params, if not acquired via capture...
if !is_method {
for bounds in params {
atys += [T_ptr(cx.tydesc_type)];
for bound in *bounds {
alt bound {
ty::bound_iface(_) { atys += [T_ptr(T_dict())]; }
_ {}
}
}
}
}
// ... then explicit args.
atys += type_of_explicit_args(cx, sp, inputs);
ret T_fn(atys, llvm::LLVMVoidType());
}
// Given a function type and a count of ty params, construct an llvm type
fn type_of_fn_from_ty(cx: @crate_ctxt, sp: span, fty: ty::t,
param_bounds: [ty::param_bounds]) -> TypeRef {
// FIXME: Check should be unnecessary, b/c it's implied
// by returns_non_ty_var(t). Make that a postcondition
// (see Issue #586)
let ret_ty = ty::ty_fn_ret(cx.tcx, fty);
ret type_of_fn(cx, sp, false, ty::ty_fn_args(cx.tcx, fty),
ret_ty, param_bounds);
}
fn type_of_inner(cx: @crate_ctxt, sp: span, t: ty::t)
: non_ty_var(cx, t) -> TypeRef {
// Check the cache.
if cx.lltypes.contains_key(t) { ret cx.lltypes.get(t); }
let llty = alt ty::struct(cx.tcx, t) {
ty::ty_native(_) { T_ptr(T_i8()) }
ty::ty_nil. { T_nil() }
ty::ty_bot. {
T_nil() /* ...I guess? */
}
ty::ty_bool. { T_bool() }
ty::ty_int(t) { T_int_ty(cx, t) }
ty::ty_uint(t) { T_uint_ty(cx, t) }
ty::ty_float(t) { T_float_ty(cx, t) }
ty::ty_str. { T_ptr(T_vec(cx, T_i8())) }
ty::ty_tag(did, _) { type_of_tag(cx, sp, did, t) }
ty::ty_box(mt) {
let mt_ty = mt.ty;
check non_ty_var(cx, mt_ty);
T_ptr(T_box(cx, type_of_inner(cx, sp, mt_ty))) }
ty::ty_uniq(mt) {
let mt_ty = mt.ty;
check non_ty_var(cx, mt_ty);
T_ptr(type_of_inner(cx, sp, mt_ty)) }
ty::ty_vec(mt) {
let mt_ty = mt.ty;
if ty::type_has_dynamic_size(cx.tcx, mt_ty) {
T_ptr(cx.opaque_vec_type)
} else {
// should be unnecessary
check non_ty_var(cx, mt_ty);
T_ptr(T_vec(cx, type_of_inner(cx, sp, mt_ty))) }
}
ty::ty_ptr(mt) {
let mt_ty = mt.ty;
check non_ty_var(cx, mt_ty);
T_ptr(type_of_inner(cx, sp, mt_ty)) }
ty::ty_rec(fields) {
let tys: [TypeRef] = [];
for f: ty::field in fields {
let mt_ty = f.mt.ty;
check non_ty_var(cx, mt_ty);
tys += [type_of_inner(cx, sp, mt_ty)];
}
T_struct(tys)
}
ty::ty_fn(_) {
T_fn_pair(cx, type_of_fn_from_ty(cx, sp, t, []))
}
ty::ty_native_fn(args, out) {
let nft = native_fn_wrapper_type(cx, sp, [], t);
T_fn_pair(cx, nft)
}
ty::ty_obj(_) { cx.rust_object_type }
ty::ty_iface(_, _) { T_opaque_iface_ptr(cx) }
ty::ty_res(_, sub, tps) {
let sub1 = ty::substitute_type_params(cx.tcx, tps, sub);
check non_ty_var(cx, sub1);
// FIXME #1184: Resource flag is larger than necessary
ret T_struct([cx.int_type, type_of_inner(cx, sp, sub1)]);
}
ty::ty_var(_) {
// Should be unreachable b/c of precondition.
// FIXME: would be nice to have a way of expressing this
// through postconditions, and then making it sound to omit
// cases in the alt
std::util::unreachable()
}
ty::ty_param(_, _) { T_typaram(cx.tn) }
ty::ty_send_type. | ty::ty_type. { T_ptr(cx.tydesc_type) }
ty::ty_tup(elts) {
let tys = [];
for elt in elts {
check non_ty_var(cx, elt);
tys += [type_of_inner(cx, sp, elt)];
}
T_struct(tys)
}
ty::ty_opaque_closure_ptr(_) {
T_opaque_cbox_ptr(cx)
}
ty::ty_constr(subt,_) {
// FIXME: could be a constraint on ty_fn
check non_ty_var(cx, subt);
type_of_inner(cx, sp, subt)
}
_ {
fail "type_of_inner not implemented for this kind of type";
}
};
cx.lltypes.insert(t, llty);
ret llty;
}
fn type_of_tag(cx: @crate_ctxt, sp: span, did: ast::def_id, t: ty::t)
-> TypeRef {
let degen = vec::len(*ty::tag_variants(cx.tcx, did)) == 1u;
if check type_has_static_size(cx, t) {
let size = static_size_of_tag(cx, sp, t);
if !degen { T_tag(cx, size) }
else if size == 0u { T_struct([T_tag_variant(cx)]) }
else { T_array(T_i8(), size) }
}
else {
if degen { T_struct([T_tag_variant(cx)]) }
else { T_opaque_tag(cx) }
}
}
fn type_of_ty_param_bounds_and_ty(lcx: @local_ctxt, sp: span,
tpt: ty::ty_param_bounds_and_ty) -> TypeRef {
let cx = lcx.ccx;
let t = tpt.ty;
alt ty::struct(cx.tcx, t) {
ty::ty_fn(_) | ty::ty_native_fn(_, _) {
ret type_of_fn_from_ty(cx, sp, t, *tpt.bounds);
}
_ {
// fall through
}
}
// FIXME: could have a precondition on tpt, but that
// doesn't work right now because one predicate can't imply
// another
check (type_has_static_size(cx, t));
type_of(cx, sp, t)
}
fn type_of_or_i8(bcx: @block_ctxt, typ: ty::t) -> TypeRef {
let ccx = bcx_ccx(bcx);
if check type_has_static_size(ccx, typ) {
let sp = bcx.sp;
type_of(ccx, sp, typ)
} else { T_i8() }
}
// Name sanitation. LLVM will happily accept identifiers with weird names, but
// gas doesn't!
fn sanitize(s: str) -> str {
let result = "";
for c: u8 in s {
if c == '@' as u8 {
result += "boxed_";
} else {
if c == ',' as u8 {
result += "_";
} else {
if c == '{' as u8 || c == '(' as u8 {
result += "_of_";
} else {
if c != 10u8 && c != '}' as u8 && c != ')' as u8 &&
c != ' ' as u8 && c != '\t' as u8 && c != ';' as u8
{
let v = [c];
result += str::unsafe_from_bytes(v);
}
}
}
}
}
ret result;
}
fn log_fn_time(ccx: @crate_ctxt, name: str, start: time::timeval,
end: time::timeval) {
let elapsed =
1000 * (end.sec - start.sec as int) +
((end.usec as int) - (start.usec as int)) / 1000;
*ccx.stats.fn_times += [{ident: name, time: elapsed}];
}
fn decl_fn(llmod: ModuleRef, name: str, cc: uint, llty: TypeRef) -> ValueRef {
let llfn: ValueRef =
str::as_buf(name, {|buf|
llvm::LLVMGetOrInsertFunction(llmod, buf, llty) });
llvm::LLVMSetFunctionCallConv(llfn, cc);
ret llfn;
}
fn decl_cdecl_fn(llmod: ModuleRef, name: str, llty: TypeRef) -> ValueRef {
ret decl_fn(llmod, name, lib::llvm::LLVMCCallConv, llty);
}
// Only use this if you are going to actually define the function. It's
// not valid to simply declare a function as internal.
fn decl_internal_cdecl_fn(llmod: ModuleRef, name: str, llty: TypeRef) ->
ValueRef {
let llfn = decl_cdecl_fn(llmod, name, llty);
llvm::LLVMSetLinkage(llfn,
lib::llvm::LLVMInternalLinkage as llvm::Linkage);
ret llfn;
}
fn get_extern_fn(externs: hashmap<str, ValueRef>, llmod: ModuleRef, name: str,
cc: uint, ty: TypeRef) -> ValueRef {
if externs.contains_key(name) { ret externs.get(name); }
let f = decl_fn(llmod, name, cc, ty);
externs.insert(name, f);
ret f;
}
fn get_extern_const(externs: hashmap<str, ValueRef>, llmod: ModuleRef,
name: str, ty: TypeRef) -> ValueRef {
if externs.contains_key(name) { ret externs.get(name); }
let c = str::as_buf(name, {|buf| llvm::LLVMAddGlobal(llmod, ty, buf) });
externs.insert(name, c);
ret c;
}
fn get_simple_extern_fn(cx: @block_ctxt,
externs: hashmap<str, ValueRef>,
llmod: ModuleRef,
name: str, n_args: int) -> ValueRef {
let ccx = cx.fcx.lcx.ccx;
let inputs = vec::init_elt::<TypeRef>(ccx.int_type, n_args as uint);
let output = ccx.int_type;
let t = T_fn(inputs, output);
ret get_extern_fn(externs, llmod, name, lib::llvm::LLVMCCallConv, t);
}
fn trans_native_call(cx: @block_ctxt, externs: hashmap<str, ValueRef>,
llmod: ModuleRef, name: str, args: [ValueRef]) ->
ValueRef {
let n: int = vec::len::<ValueRef>(args) as int;
let llnative: ValueRef =
get_simple_extern_fn(cx, externs, llmod, name, n);
let call_args: [ValueRef] = [];
for a: ValueRef in args {
call_args += [ZExtOrBitCast(cx, a, bcx_ccx(cx).int_type)];
}
ret Call(cx, llnative, call_args);
}
fn trans_free_if_not_gc(cx: @block_ctxt, v: ValueRef) -> @block_ctxt {
let ccx = bcx_ccx(cx);
if !ccx.sess.get_opts().do_gc {
Call(cx, ccx.upcalls.free,
[PointerCast(cx, v, T_ptr(T_i8())),
C_int(bcx_ccx(cx), 0)]);
}
ret cx;
}
fn trans_shared_free(cx: @block_ctxt, v: ValueRef) -> @block_ctxt {
Call(cx, bcx_ccx(cx).upcalls.shared_free,
[PointerCast(cx, v, T_ptr(T_i8()))]);
ret cx;
}
fn umax(cx: @block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
let cond = ICmp(cx, lib::llvm::LLVMIntULT, a, b);
ret Select(cx, cond, b, a);
}
fn umin(cx: @block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
let cond = ICmp(cx, lib::llvm::LLVMIntULT, a, b);
ret Select(cx, cond, a, b);
}
fn align_to(cx: @block_ctxt, off: ValueRef, align: ValueRef) -> ValueRef {
let mask = Sub(cx, align, C_int(bcx_ccx(cx), 1));
let bumped = Add(cx, off, mask);
ret And(cx, bumped, Not(cx, mask));
}
// Returns the real size of the given type for the current target.
fn llsize_of_real(cx: @crate_ctxt, t: TypeRef) -> uint {
ret llvm::LLVMStoreSizeOfType(cx.td.lltd, t);
}
// Returns the real alignment of the given type for the current target.
fn llalign_of_real(cx: @crate_ctxt, t: TypeRef) -> uint {
ret llvm::LLVMPreferredAlignmentOfType(cx.td.lltd, t);
}
fn llsize_of(cx: @crate_ctxt, t: TypeRef) -> ValueRef {
ret llvm::LLVMConstIntCast(lib::llvm::llvm::LLVMSizeOf(t), cx.int_type,
False);
}
fn llalign_of(cx: @crate_ctxt, t: TypeRef) -> ValueRef {
ret llvm::LLVMConstIntCast(lib::llvm::llvm::LLVMAlignOf(t), cx.int_type,
False);
}
fn size_of(cx: @block_ctxt, t: ty::t) -> result {
size_of_(cx, t, align_total)
}
tag align_mode {
align_total;
align_next(ty::t);
}
fn size_of_(cx: @block_ctxt, t: ty::t, mode: align_mode) -> result {
let ccx = bcx_ccx(cx);
if check type_has_static_size(ccx, t) {
let sp = cx.sp;
rslt(cx, llsize_of(bcx_ccx(cx), type_of(ccx, sp, t)))
} else { dynamic_size_of(cx, t, mode) }
}
fn align_of(cx: @block_ctxt, t: ty::t) -> result {
let ccx = bcx_ccx(cx);
if check type_has_static_size(ccx, t) {
let sp = cx.sp;
rslt(cx, llalign_of(bcx_ccx(cx), type_of(ccx, sp, t)))
} else { dynamic_align_of(cx, t) }
}
fn alloca(cx: @block_ctxt, t: TypeRef) -> ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(t); }
ret Alloca(new_raw_block_ctxt(cx.fcx, cx.fcx.llstaticallocas), t);
}
fn dynastack_alloca(cx: @block_ctxt, t: TypeRef, n: ValueRef, ty: ty::t) ->
ValueRef {
if cx.unreachable { ret llvm::LLVMGetUndef(t); }
let bcx = cx;
let dy_cx = new_raw_block_ctxt(cx.fcx, cx.fcx.lldynamicallocas);
alt bcx_fcx(cx).llobstacktoken {
none. {
bcx_fcx(cx).llobstacktoken =
some(mk_obstack_token(bcx_ccx(cx), cx.fcx));
}
some(_) {/* no-op */ }
}
let dynastack_alloc = bcx_ccx(bcx).upcalls.dynastack_alloc;
let llsz = Mul(dy_cx,
C_uint(bcx_ccx(bcx), llsize_of_real(bcx_ccx(bcx), t)),
n);
let ti = none;
let lltydesc = get_tydesc(cx, ty, false, tps_normal, ti).result.val;
let llresult = Call(dy_cx, dynastack_alloc, [llsz, lltydesc]);
ret PointerCast(dy_cx, llresult, T_ptr(t));
}
fn mk_obstack_token(ccx: @crate_ctxt, fcx: @fn_ctxt) ->
ValueRef {
let cx = new_raw_block_ctxt(fcx, fcx.lldynamicallocas);
ret Call(cx, ccx.upcalls.dynastack_mark, []);
}
// Creates a simpler, size-equivalent type. The resulting type is guaranteed
// to have (a) the same size as the type that was passed in; (b) to be non-
// recursive. This is done by replacing all boxes in a type with boxed unit
// types.
fn simplify_type(ccx: @crate_ctxt, typ: ty::t) -> ty::t {
fn simplifier(ccx: @crate_ctxt, typ: ty::t) -> ty::t {
alt ty::struct(ccx.tcx, typ) {
ty::ty_box(_) | ty::ty_iface(_, _) {
ret ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx));
}
ty::ty_uniq(_) {
ret ty::mk_imm_uniq(ccx.tcx, ty::mk_nil(ccx.tcx));
}
ty::ty_fn(_) {
ret ty::mk_tup(ccx.tcx,
[ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx)),
ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx))]);
}
ty::ty_obj(_) {
ret ty::mk_tup(ccx.tcx,
[ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx)),
ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx))]);
}
ty::ty_res(_, sub, tps) {
let sub1 = ty::substitute_type_params(ccx.tcx, tps, sub);
ret ty::mk_tup(ccx.tcx,
[ty::mk_int(ccx.tcx), simplify_type(ccx, sub1)]);
}
_ { ret typ; }
}
}
ret ty::fold_ty(ccx.tcx, ty::fm_general(bind simplifier(ccx, _)), typ);
}
// Computes the size of the data part of a non-dynamically-sized tag.
fn static_size_of_tag(cx: @crate_ctxt, sp: span, t: ty::t)
: type_has_static_size(cx, t) -> uint {
if cx.tag_sizes.contains_key(t) { ret cx.tag_sizes.get(t); }
alt ty::struct(cx.tcx, t) {
ty::ty_tag(tid, subtys) {
// Compute max(variant sizes).
let max_size = 0u;
let variants = ty::tag_variants(cx.tcx, tid);
for variant: ty::variant_info in *variants {
let tup_ty = simplify_type(cx, ty::mk_tup(cx.tcx, variant.args));
// Perform any type parameter substitutions.
tup_ty = ty::substitute_type_params(cx.tcx, subtys, tup_ty);
// Here we possibly do a recursive call.
// FIXME: Avoid this check. Since the parent has static
// size, any field must as well. There should be a way to
// express that with constrained types.
check (type_has_static_size(cx, tup_ty));
let this_size = llsize_of_real(cx, type_of(cx, sp, tup_ty));
if max_size < this_size { max_size = this_size; }
}
cx.tag_sizes.insert(t, max_size);
ret max_size;
}
_ {
cx.tcx.sess.span_fatal(sp, "non-tag passed to static_size_of_tag()");
}
}
}
fn dynamic_size_of(cx: @block_ctxt, t: ty::t, mode: align_mode) -> result {
fn align_elements(cx: @block_ctxt, elts: [ty::t],
mode: align_mode) -> result {
//
// C padding rules:
//
//
// - Pad after each element so that next element is aligned.
// - Pad after final structure member so that whole structure
// is aligned to max alignment of interior.
//
let off = C_int(bcx_ccx(cx), 0);
let max_align = C_int(bcx_ccx(cx), 1);
let bcx = cx;
for e: ty::t in elts {
let elt_align = align_of(bcx, e);
bcx = elt_align.bcx;
let elt_size = size_of(bcx, e);
bcx = elt_size.bcx;
let aligned_off = align_to(bcx, off, elt_align.val);
off = Add(bcx, aligned_off, elt_size.val);
max_align = umax(bcx, max_align, elt_align.val);
}
off = alt mode {
align_total. {
align_to(bcx, off, max_align)
}
align_next(t) {
let {bcx, val: align} = align_of(bcx, t);
align_to(bcx, off, align)
}
};
ret rslt(bcx, off);
}
alt ty::struct(bcx_tcx(cx), t) {
ty::ty_param(p, _) {
let szptr = field_of_tydesc(cx, t, false, abi::tydesc_field_size);
ret rslt(szptr.bcx, Load(szptr.bcx, szptr.val));
}
ty::ty_rec(flds) {
let tys: [ty::t] = [];
for f: ty::field in flds { tys += [f.mt.ty]; }
ret align_elements(cx, tys, mode);
}
ty::ty_tup(elts) {
let tys = [];
for tp in elts { tys += [tp]; }
ret align_elements(cx, tys, mode);
}
ty::ty_tag(tid, tps) {
let bcx = cx;
let ccx = bcx_ccx(bcx);
// Compute max(variant sizes).
let max_size: ValueRef = alloca(bcx, ccx.int_type);
Store(bcx, C_int(ccx, 0), max_size);
let variants = ty::tag_variants(bcx_tcx(bcx), tid);
for variant: ty::variant_info in *variants {
// Perform type substitution on the raw argument types.
let raw_tys: [ty::t] = variant.args;
let tys: [ty::t] = [];
for raw_ty: ty::t in raw_tys {
let t = ty::substitute_type_params(bcx_tcx(cx), tps, raw_ty);
tys += [t];
}
let rslt = align_elements(bcx, tys, mode);
bcx = rslt.bcx;
let this_size = rslt.val;
let old_max_size = Load(bcx, max_size);
Store(bcx, umax(bcx, this_size, old_max_size), max_size);
}
let max_size_val = Load(bcx, max_size);
let total_size =
if vec::len(*variants) != 1u {
Add(bcx, max_size_val, llsize_of(ccx, ccx.int_type))
} else { max_size_val };
ret rslt(bcx, total_size);
}
}
}
fn dynamic_align_of(cx: @block_ctxt, t: ty::t) -> result {
// FIXME: Typestate constraint that shows this alt is
// exhaustive
alt ty::struct(bcx_tcx(cx), t) {
ty::ty_param(p, _) {
let aptr = field_of_tydesc(cx, t, false, abi::tydesc_field_align);
ret rslt(aptr.bcx, Load(aptr.bcx, aptr.val));
}
ty::ty_rec(flds) {
let a = C_int(bcx_ccx(cx), 1);
let bcx = cx;
for f: ty::field in flds {
let align = align_of(bcx, f.mt.ty);
bcx = align.bcx;
a = umax(bcx, a, align.val);
}
ret rslt(bcx, a);
}
ty::ty_tag(_, _) {
ret rslt(cx, C_int(bcx_ccx(cx), 1)); // FIXME: stub
}
ty::ty_tup(elts) {
let a = C_int(bcx_ccx(cx), 1);
let bcx = cx;
for e in elts {
let align = align_of(bcx, e);
bcx = align.bcx;
a = umax(bcx, a, align.val);
}
ret rslt(bcx, a);
}
}
}
// Given a pointer p, returns a pointer sz(p) (i.e., inc'd by sz bytes).
// The type of the returned pointer is always i8*. If you care about the
// return type, use bump_ptr().
fn ptr_offs(bcx: @block_ctxt, base: ValueRef, sz: ValueRef) -> ValueRef {
let raw = PointerCast(bcx, base, T_ptr(T_i8()));
GEP(bcx, raw, [sz])
}
// Increment a pointer by a given amount and then cast it to be a pointer
// to a given type.
fn bump_ptr(bcx: @block_ctxt, t: ty::t, base: ValueRef, sz: ValueRef) ->
ValueRef {
let ccx = bcx_ccx(bcx);
let bumped = ptr_offs(bcx, base, sz);
if check type_has_static_size(ccx, t) {
let sp = bcx.sp;
let typ = T_ptr(type_of(ccx, sp, t));
PointerCast(bcx, bumped, typ)
} else { bumped }
}
// GEP_tup_like is a pain to use if you always have to precede it with a
// check.
fn GEP_tup_like_1(cx: @block_ctxt, t: ty::t, base: ValueRef, ixs: [int])
-> result {
check type_is_tup_like(cx, t);
ret GEP_tup_like(cx, t, base, ixs);
}
// Replacement for the LLVM 'GEP' instruction when field-indexing into a
// tuple-like structure (tup, rec) with a static index. This one is driven off
// ty::struct and knows what to do when it runs into a ty_param stuck in the
// middle of the thing it's GEP'ing into. Much like size_of and align_of,
// above.
fn GEP_tup_like(bcx: @block_ctxt, t: ty::t, base: ValueRef, ixs: [int])
: type_is_tup_like(bcx, t) -> result {
// It might be a static-known type. Handle this.
if !ty::type_has_dynamic_size(bcx_tcx(bcx), t) {
ret rslt(bcx, GEPi(bcx, base, ixs));
}
// It is a dynamic-containing type that, if we convert directly to an LLVM
// TypeRef, will be all wrong; there's no proper LLVM type to represent
// it, and the lowering function will stick in i8* values for each
// ty_param, which is not right; the ty_params are all of some dynamic
// size.
//
// What we must do instead is sadder. We must look through the indices
// manually and split the input type into a prefix and a target. We then
// measure the prefix size, bump the input pointer by that amount, and
// cast to a pointer-to-target type.
// Given a type, an index vector and an element number N in that vector,
// calculate index X and the type that results by taking the first X-1
// elements of the type and splitting the Xth off. Return the prefix as
// well as the innermost Xth type.
fn split_type(ccx: @crate_ctxt, t: ty::t, ixs: [int], n: uint) ->
{prefix: [ty::t], target: ty::t} {
let len: uint = vec::len::<int>(ixs);
// We don't support 0-index or 1-index GEPs: The former is nonsense
// and the latter would only be meaningful if we supported non-0
// values for the 0th index (we don't).
assert (len > 1u);
if n == 0u {
// Since we're starting from a value that's a pointer to a
// *single* structure, the first index (in GEP-ese) should just be
// 0, to yield the pointee.
assert (ixs[n] == 0);
ret split_type(ccx, t, ixs, n + 1u);
}
assert (n < len);
let ix: int = ixs[n];
let prefix: [ty::t] = [];
let i: int = 0;
while i < ix {
prefix += [ty::get_element_type(ccx.tcx, t, i as uint)];
i += 1;
}
let selected = ty::get_element_type(ccx.tcx, t, i as uint);
if n == len - 1u {
// We are at the innermost index.
ret {prefix: prefix, target: selected};
} else {
// Not the innermost index; call self recursively to dig deeper.
// Once we get an inner result, append it current prefix and
// return to caller.
let inner = split_type(ccx, selected, ixs, n + 1u);
prefix += inner.prefix;
ret {prefix: prefix with inner};
}
}
// We make a fake prefix tuple-type here; luckily for measuring sizes
// the tuple parens are associative so it doesn't matter that we've
// flattened the incoming structure.
let s = split_type(bcx_ccx(bcx), t, ixs, 0u);
let args = [];
for typ: ty::t in s.prefix { args += [typ]; }
let prefix_ty = ty::mk_tup(bcx_tcx(bcx), args);
let sz = size_of_(bcx, prefix_ty, align_next(s.target));
ret rslt(sz.bcx, bump_ptr(sz.bcx, s.target, base, sz.val));
}
// Replacement for the LLVM 'GEP' instruction when field indexing into a tag.
// This function uses GEP_tup_like() above and automatically performs casts as
// appropriate. @llblobptr is the data part of a tag value; its actual type is
// meaningless, as it will be cast away.
fn GEP_tag(cx: @block_ctxt, llblobptr: ValueRef, tag_id: ast::def_id,
variant_id: ast::def_id, ty_substs: [ty::t],
ix: uint) : valid_variant_index(ix, cx, tag_id, variant_id) ->
result {
let variant = ty::tag_variant_with_id(bcx_tcx(cx), tag_id, variant_id);
// Synthesize a tuple type so that GEP_tup_like() can work its magic.
// Separately, store the type of the element we're interested in.
let arg_tys = variant.args;
let true_arg_tys: [ty::t] = [];
for aty: ty::t in arg_tys {
let arg_ty = ty::substitute_type_params(bcx_tcx(cx), ty_substs, aty);
true_arg_tys += [arg_ty];
}
// We know that ix < len(variant.args) -- so
// it's safe to do this. (Would be nice to have
// typestate guarantee that a dynamic bounds check
// error can't happen here, but that's in the future.)
let elem_ty = true_arg_tys[ix];
let tup_ty = ty::mk_tup(bcx_tcx(cx), true_arg_tys);
// Cast the blob pointer to the appropriate type, if we need to (i.e. if
// the blob pointer isn't dynamically sized).
let llunionptr: ValueRef;
let sp = cx.sp;
let ccx = bcx_ccx(cx);
if check type_has_static_size(ccx, tup_ty) {
let llty = type_of(ccx, sp, tup_ty);
llunionptr = TruncOrBitCast(cx, llblobptr, T_ptr(llty));
} else { llunionptr = llblobptr; }
// Do the GEP_tup_like().
// Silly check -- postcondition on mk_tup?
check type_is_tup_like(cx, tup_ty);
let rs = GEP_tup_like(cx, tup_ty, llunionptr, [0, ix as int]);
// Cast the result to the appropriate type, if necessary.
let rs_ccx = bcx_ccx(rs.bcx);
let val =
if check type_has_static_size(rs_ccx, elem_ty) {
let llelemty = type_of(rs_ccx, sp, elem_ty);
PointerCast(rs.bcx, rs.val, T_ptr(llelemty))
} else { rs.val };
ret rslt(rs.bcx, val);
}
// trans_shared_malloc: expects a type indicating which pointer type we want
// and a size indicating how much space we want malloc'd.
fn trans_shared_malloc(cx: @block_ctxt, llptr_ty: TypeRef, llsize: ValueRef)
-> result {
// FIXME: need a table to collect tydesc globals.
let tydesc = C_null(T_ptr(bcx_ccx(cx).tydesc_type));
let rval =
Call(cx, bcx_ccx(cx).upcalls.shared_malloc,
[llsize, tydesc]);
ret rslt(cx, PointerCast(cx, rval, llptr_ty));
}
// trans_malloc_boxed_raw: expects an unboxed type and returns a pointer to
// enough space for something of that type, along with space for a reference
// count; in other words, it allocates a box for something of that type.
fn trans_malloc_boxed_raw(cx: @block_ctxt, t: ty::t) -> result {
let bcx = cx;
// Synthesize a fake box type structurally so we have something
// to measure the size of.
// We synthesize two types here because we want both the type of the
// pointer and the pointee. boxed_body is the type that we measure the
// size of; box_ptr is the type that's converted to a TypeRef and used as
// the pointer cast target in trans_raw_malloc.
// The mk_int here is the space being
// reserved for the refcount.
let boxed_body = ty::mk_tup(bcx_tcx(bcx), [ty::mk_int(bcx_tcx(cx)), t]);
let box_ptr = ty::mk_imm_box(bcx_tcx(bcx), t);
let r = size_of(cx, boxed_body);
let llsz = r.val; bcx = r.bcx;
// Grab the TypeRef type of box_ptr, because that's what trans_raw_malloc
// wants.
// FIXME: Could avoid this check with a postcondition on mk_imm_box?
// (requires Issue #586)
let ccx = bcx_ccx(bcx);
let sp = bcx.sp;
check (type_has_static_size(ccx, box_ptr));
let llty = type_of(ccx, sp, box_ptr);
let ti = none;
let tydesc_result = get_tydesc(bcx, t, true, tps_normal, ti);
let lltydesc = tydesc_result.result.val; bcx = tydesc_result.result.bcx;
let rval = Call(cx, ccx.upcalls.malloc,
[llsz, lltydesc]);
ret rslt(cx, PointerCast(cx, rval, llty));
}
// trans_malloc_boxed: usefully wraps trans_malloc_box_raw; allocates a box,
// initializes the reference count to 1, and pulls out the body and rc
fn trans_malloc_boxed(cx: @block_ctxt, t: ty::t) ->
{bcx: @block_ctxt, box: ValueRef, body: ValueRef} {
let res = trans_malloc_boxed_raw(cx, t);
let box = res.val;
let rc = GEPi(res.bcx, box, [0, abi::box_rc_field_refcnt]);
Store(res.bcx, C_int(bcx_ccx(cx), 1), rc);
let body = GEPi(res.bcx, box, [0, abi::box_rc_field_body]);
ret {bcx: res.bcx, box: res.val, body: body};
}
// Type descriptor and type glue stuff
// Given a type and a field index into its corresponding type descriptor,
// returns an LLVM ValueRef of that field from the tydesc, generating the
// tydesc if necessary.
fn field_of_tydesc(cx: @block_ctxt, t: ty::t, escapes: bool, field: int) ->
result {
let ti = none::<@tydesc_info>;
let tydesc = get_tydesc(cx, t, escapes, tps_normal, ti).result;
ret rslt(tydesc.bcx,
GEPi(tydesc.bcx, tydesc.val, [0, field]));
}
// Given a type containing ty params, build a vector containing a ValueRef for
// each of the ty params it uses (from the current frame) and a vector of the
// indices of the ty params present in the type. This is used solely for
// constructing derived tydescs.
fn linearize_ty_params(cx: @block_ctxt, t: ty::t) ->
{params: [uint], descs: [ValueRef]} {
let param_vals: [ValueRef] = [];
let param_defs: [uint] = [];
type rr =
{cx: @block_ctxt, mutable vals: [ValueRef], mutable defs: [uint]};
fn linearizer(r: @rr, t: ty::t) {
alt ty::struct(bcx_tcx(r.cx), t) {
ty::ty_param(pid, _) {
let seen: bool = false;
for d: uint in r.defs { if d == pid { seen = true; } }
if !seen {
r.vals += [r.cx.fcx.lltyparams[pid].desc];
r.defs += [pid];
}
}
_ { }
}
}
let x = @{cx: cx, mutable vals: param_vals, mutable defs: param_defs};
let f = bind linearizer(x, _);
ty::walk_ty(bcx_tcx(cx), f, t);
ret {params: x.defs, descs: x.vals};
}
fn trans_stack_local_derived_tydesc(cx: @block_ctxt, llsz: ValueRef,
llalign: ValueRef, llroottydesc: ValueRef,
llfirstparam: ValueRef, n_params: uint,
obj_params: uint) -> ValueRef {
let llmyroottydesc = alloca(cx, bcx_ccx(cx).tydesc_type);
// By convention, desc 0 is the root descriptor.
let llroottydesc = Load(cx, llroottydesc);
Store(cx, llroottydesc, llmyroottydesc);
// Store a pointer to the rest of the descriptors.
let ccx = bcx_ccx(cx);
store_inbounds(cx, llfirstparam, llmyroottydesc,
[0, abi::tydesc_field_first_param]);
store_inbounds(cx, C_uint(ccx, n_params), llmyroottydesc,
[0, abi::tydesc_field_n_params]);
store_inbounds(cx, llsz, llmyroottydesc,
[0, abi::tydesc_field_size]);
store_inbounds(cx, llalign, llmyroottydesc,
[0, abi::tydesc_field_align]);
store_inbounds(cx, C_uint(ccx, obj_params), llmyroottydesc,
[0, abi::tydesc_field_obj_params]);
ret llmyroottydesc;
}
// Objects and closures store their type parameters differently (in the object
// or closure itself rather than in the type descriptor).
tag ty_param_storage { tps_normal; tps_obj(uint); tps_fn(uint); }
fn get_derived_tydesc(cx: @block_ctxt, t: ty::t, escapes: bool,
storage: ty_param_storage,
&static_ti: option::t<@tydesc_info>) -> result {
alt cx.fcx.derived_tydescs.find(t) {
some(info) {
// If the tydesc escapes in this context, the cached derived
// tydesc also has to be one that was marked as escaping.
if !(escapes && !info.escapes) && storage == tps_normal {
ret rslt(cx, info.lltydesc);
}
}
none. {/* fall through */ }
}
let is_obj_body;
alt storage {
tps_normal. { is_obj_body = false; }
tps_obj(_) | tps_fn(_) { is_obj_body = true; }
}
bcx_ccx(cx).stats.n_derived_tydescs += 1u;
let bcx = new_raw_block_ctxt(cx.fcx, cx.fcx.llderivedtydescs);
let tys = linearize_ty_params(bcx, t);
let root_ti = get_static_tydesc(bcx, t, tys.params, is_obj_body);
static_ti = some::<@tydesc_info>(root_ti);
lazily_emit_all_tydesc_glue(cx, static_ti);
let root = root_ti.tydesc;
let sz = size_of(bcx, t);
bcx = sz.bcx;
let align = align_of(bcx, t);
bcx = align.bcx;
// Store the captured type descriptors in an alloca if the caller isn't
// promising to do so itself.
let n_params = ty::count_ty_params(bcx_tcx(bcx), t);