@@ -355,15 +355,15 @@ pub const MCValue = union(enum) {
355
355
356
356
/// Returns MCV of a limb.
357
357
/// Caller does not own returned values.
358
- fn toLimbValue (mcv : MCValue , limb_index : usize ) MCValue {
358
+ fn toLimbValue (mcv : MCValue , limb_index : u64 ) MCValue {
359
359
switch (mcv ) {
360
360
else = > std .debug .panic ("{s}: {}\n " , .{ @src ().fn_name , mcv }),
361
361
.register , .immediate , .register_bias , .lea_frame , .lea_nav , .lea_uav , .lea_lazy_sym = > {
362
362
assert (limb_index == 0 );
363
363
return mcv ;
364
364
},
365
365
inline .register_pair , .register_triple , .register_quadruple = > | regs | {
366
- return .{ .register = regs [limb_index ] };
366
+ return .{ .register = regs [@intCast ( limb_index ) ] };
367
367
},
368
368
.load_frame = > | frame_addr | {
369
369
return .{ .load_frame = .{
@@ -1381,12 +1381,12 @@ const Temp = struct {
1381
1381
},
1382
1382
inline .register_pair , .register_triple , .register_quadruple = > | regs | {
1383
1383
if (reuse )
1384
- new_temp_index .tracking (cg ).* = .init (.{ .register = regs [limb_index ] })
1384
+ new_temp_index .tracking (cg ).* = .init (.{ .register = regs [@intCast ( limb_index ) ] })
1385
1385
else {
1386
1386
const new_reg =
1387
1387
try cg .register_manager .allocReg (new_temp_index .toIndex (), abi .RegisterSets .gp );
1388
1388
new_temp_index .tracking (cg ).* = .init (.{ .register = new_reg });
1389
- try cg .asmInst (.@"or" (new_reg , regs [limb_index ], .zero ));
1389
+ try cg .asmInst (.@"or" (new_reg , regs [@intCast ( limb_index ) ], .zero ));
1390
1390
}
1391
1391
},
1392
1392
.register_bias , .register_offset = > | _ | {
@@ -1461,7 +1461,7 @@ const Temp = struct {
1461
1461
1462
1462
/// Returns MCV of a limb.
1463
1463
/// Caller does not own return values.
1464
- fn toLimbValue (temp : Temp , limb_index : usize , cg : * CodeGen ) MCValue {
1464
+ fn toLimbValue (temp : Temp , limb_index : u64 , cg : * CodeGen ) MCValue {
1465
1465
return temp .tracking (cg ).short .toLimbValue (limb_index );
1466
1466
}
1467
1467
@@ -2550,7 +2550,7 @@ fn genCopyToMem(cg: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !voi
2550
2550
.register_offset , .load_frame , .load_nav , .load_uav , .load_lazy_sym = > {
2551
2551
const reg , const reg_lock = try cg .allocRegAndLock (.usize );
2552
2552
defer cg .register_manager .unlockReg (reg_lock );
2553
- for (0.. cg .getLimbCount (ty )) | limb_i | {
2553
+ for (0.. @intCast ( cg .getLimbCount (ty ) )) | limb_i | {
2554
2554
const size = cg .getLimbSize (ty , limb_i );
2555
2555
try cg .genCopyToReg (size , reg , src_mcv .toLimbValue (limb_i ), .{});
2556
2556
try cg .genCopyRegToMem (dst_mcv .toLimbValue (limb_i ), reg , size );
@@ -2954,7 +2954,7 @@ fn airLogicBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: LogicBinOpKind) !void {
2954
2954
})) {
2955
2955
const lhs , const rhs = sel .ops [0.. 2].* ;
2956
2956
const dst , _ = try cg .tempReuseOrAlloc (inst , lhs , 0 , ty , .{ .use_frame = false });
2957
- for (0.. lhs .getLimbCount (cg )) | limb_i | {
2957
+ for (0.. @intCast ( lhs .getLimbCount (cg ) )) | limb_i | {
2958
2958
const lhs_limb = lhs .toLimbValue (limb_i , cg );
2959
2959
const rhs_limb = rhs .toLimbValue (limb_i , cg );
2960
2960
const dst_limb = dst .toLimbValue (limb_i , cg );
@@ -2970,7 +2970,7 @@ fn airLogicBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: LogicBinOpKind) !void {
2970
2970
const lhs , const rhs = sel .ops [0.. 2].* ;
2971
2971
const tmp1 , const tmp2 = sel .temps [0.. 2].* ;
2972
2972
const dst , _ = try cg .tempReuseOrAlloc (inst , lhs , 0 , ty , .{ .use_frame = false });
2973
- for (0.. lhs .getLimbCount (cg )) | limb_i | {
2973
+ for (0.. @intCast ( lhs .getLimbCount (cg ) )) | limb_i | {
2974
2974
const lhs_limb = try tmp1 .ensureReg (cg , lhs .toLimbValue (limb_i , cg ));
2975
2975
const rhs_limb = try tmp2 .ensureReg (cg , rhs .toLimbValue (limb_i , cg ));
2976
2976
const dst_limb = dst .toLimbValue (limb_i , cg );
@@ -3019,7 +3019,7 @@ fn airNot(cg: *CodeGen, inst: Air.Inst.Index) !void {
3019
3019
const op = sel .ops [0 ];
3020
3020
const tmp = sel .temps [0 ];
3021
3021
const dst , _ = try cg .tempReuseOrAlloc (inst , op , 0 , ty , .{ .use_frame = false });
3022
- for (0.. op .getLimbCount (cg )) | limb_i | {
3022
+ for (0.. @intCast ( op .getLimbCount (cg ) )) | limb_i | {
3023
3023
const op_limb = try tmp .ensureReg (cg , op .toLimbValue (limb_i , cg ));
3024
3024
const dst_limb = dst .toLimbValue (limb_i , cg );
3025
3025
try cg .asmInst (.nor (dst_limb .getReg ().? , op_limb .getReg ().? , .zero ));
0 commit comments