@@ -67,7 +67,7 @@ struct rockchip_pm_domain {
67
67
struct regmap * * qos_regmap ;
68
68
u32 * qos_save_regs [MAX_QOS_REGS_NUM ];
69
69
int num_clks ;
70
- struct clk * clks [] ;
70
+ struct clk_bulk_data * clks ;
71
71
};
72
72
73
73
struct rockchip_pmu {
@@ -274,13 +274,18 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
274
274
275
275
static int rockchip_pd_power (struct rockchip_pm_domain * pd , bool power_on )
276
276
{
277
- int i ;
277
+ struct rockchip_pmu * pmu = pd -> pmu ;
278
+ int ret ;
278
279
279
- mutex_lock (& pd -> pmu -> mutex );
280
+ mutex_lock (& pmu -> mutex );
280
281
281
282
if (rockchip_pmu_domain_is_on (pd ) != power_on ) {
282
- for (i = 0 ; i < pd -> num_clks ; i ++ )
283
- clk_enable (pd -> clks [i ]);
283
+ ret = clk_bulk_enable (pd -> num_clks , pd -> clks );
284
+ if (ret < 0 ) {
285
+ dev_err (pmu -> dev , "failed to enable clocks\n" );
286
+ mutex_unlock (& pmu -> mutex );
287
+ return ret ;
288
+ }
284
289
285
290
if (!power_on ) {
286
291
rockchip_pmu_save_qos (pd );
@@ -298,11 +303,10 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
298
303
rockchip_pmu_restore_qos (pd );
299
304
}
300
305
301
- for (i = pd -> num_clks - 1 ; i >= 0 ; i -- )
302
- clk_disable (pd -> clks [i ]);
306
+ clk_bulk_disable (pd -> num_clks , pd -> clks );
303
307
}
304
308
305
- mutex_unlock (& pd -> pmu -> mutex );
309
+ mutex_unlock (& pmu -> mutex );
306
310
return 0 ;
307
311
}
308
312
@@ -364,8 +368,6 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
364
368
const struct rockchip_domain_info * pd_info ;
365
369
struct rockchip_pm_domain * pd ;
366
370
struct device_node * qos_node ;
367
- struct clk * clk ;
368
- int clk_cnt ;
369
371
int i , j ;
370
372
u32 id ;
371
373
int error ;
@@ -391,41 +393,41 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
391
393
return - EINVAL ;
392
394
}
393
395
394
- clk_cnt = of_count_phandle_with_args (node , "clocks" , "#clock-cells" );
395
- pd = devm_kzalloc (pmu -> dev ,
396
- sizeof (* pd ) + clk_cnt * sizeof (pd -> clks [0 ]),
397
- GFP_KERNEL );
396
+ pd = devm_kzalloc (pmu -> dev , sizeof (* pd ), GFP_KERNEL );
398
397
if (!pd )
399
398
return - ENOMEM ;
400
399
401
400
pd -> info = pd_info ;
402
401
pd -> pmu = pmu ;
403
402
404
- for (i = 0 ; i < clk_cnt ; i ++ ) {
405
- clk = of_clk_get (node , i );
406
- if (IS_ERR (clk )) {
407
- error = PTR_ERR (clk );
403
+ pd -> num_clks = of_count_phandle_with_args (node , "clocks" ,
404
+ "#clock-cells" );
405
+ if (pd -> num_clks > 0 ) {
406
+ pd -> clks = devm_kcalloc (pmu -> dev , pd -> num_clks ,
407
+ sizeof (* pd -> clks ), GFP_KERNEL );
408
+ if (!pd -> clks )
409
+ return - ENOMEM ;
410
+ } else {
411
+ dev_dbg (pmu -> dev , "%s: doesn't have clocks: %d\n" ,
412
+ node -> name , pd -> num_clks );
413
+ pd -> num_clks = 0 ;
414
+ }
415
+
416
+ for (i = 0 ; i < pd -> num_clks ; i ++ ) {
417
+ pd -> clks [i ].clk = of_clk_get (node , i );
418
+ if (IS_ERR (pd -> clks [i ].clk )) {
419
+ error = PTR_ERR (pd -> clks [i ].clk );
408
420
dev_err (pmu -> dev ,
409
421
"%s: failed to get clk at index %d: %d\n" ,
410
422
node -> name , i , error );
411
- goto err_out ;
412
- }
413
-
414
- error = clk_prepare (clk );
415
- if (error ) {
416
- dev_err (pmu -> dev ,
417
- "%s: failed to prepare clk %pC (index %d): %d\n" ,
418
- node -> name , clk , i , error );
419
- clk_put (clk );
420
- goto err_out ;
423
+ return error ;
421
424
}
422
-
423
- pd -> clks [pd -> num_clks ++ ] = clk ;
424
-
425
- dev_dbg (pmu -> dev , "added clock '%pC' to domain '%s'\n" ,
426
- clk , node -> name );
427
425
}
428
426
427
+ error = clk_bulk_prepare (pd -> num_clks , pd -> clks );
428
+ if (error )
429
+ goto err_put_clocks ;
430
+
429
431
pd -> num_qos = of_count_phandle_with_args (node , "pm_qos" ,
430
432
NULL );
431
433
@@ -435,7 +437,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
435
437
GFP_KERNEL );
436
438
if (!pd -> qos_regmap ) {
437
439
error = - ENOMEM ;
438
- goto err_out ;
440
+ goto err_unprepare_clocks ;
439
441
}
440
442
441
443
for (j = 0 ; j < MAX_QOS_REGS_NUM ; j ++ ) {
@@ -445,21 +447,21 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
445
447
GFP_KERNEL );
446
448
if (!pd -> qos_save_regs [j ]) {
447
449
error = - ENOMEM ;
448
- goto err_out ;
450
+ goto err_unprepare_clocks ;
449
451
}
450
452
}
451
453
452
454
for (j = 0 ; j < pd -> num_qos ; j ++ ) {
453
455
qos_node = of_parse_phandle (node , "pm_qos" , j );
454
456
if (!qos_node ) {
455
457
error = - ENODEV ;
456
- goto err_out ;
458
+ goto err_unprepare_clocks ;
457
459
}
458
460
pd -> qos_regmap [j ] = syscon_node_to_regmap (qos_node );
459
461
if (IS_ERR (pd -> qos_regmap [j ])) {
460
462
error = - ENODEV ;
461
463
of_node_put (qos_node );
462
- goto err_out ;
464
+ goto err_unprepare_clocks ;
463
465
}
464
466
of_node_put (qos_node );
465
467
}
@@ -470,7 +472,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
470
472
dev_err (pmu -> dev ,
471
473
"failed to power on domain '%s': %d\n" ,
472
474
node -> name , error );
473
- goto err_out ;
475
+ goto err_unprepare_clocks ;
474
476
}
475
477
476
478
pd -> genpd .name = node -> name ;
@@ -486,17 +488,16 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
486
488
pmu -> genpd_data .domains [id ] = & pd -> genpd ;
487
489
return 0 ;
488
490
489
- err_out :
490
- while (-- i >= 0 ) {
491
- clk_unprepare (pd -> clks [i ]);
492
- clk_put (pd -> clks [i ]);
493
- }
491
+ err_unprepare_clocks :
492
+ clk_bulk_unprepare (pd -> num_clks , pd -> clks );
493
+ err_put_clocks :
494
+ clk_bulk_put (pd -> num_clks , pd -> clks );
494
495
return error ;
495
496
}
496
497
497
498
static void rockchip_pm_remove_one_domain (struct rockchip_pm_domain * pd )
498
499
{
499
- int i , ret ;
500
+ int ret ;
500
501
501
502
/*
502
503
* We're in the error cleanup already, so we only complain,
@@ -507,10 +508,8 @@ static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
507
508
dev_err (pd -> pmu -> dev , "failed to remove domain '%s' : %d - state may be inconsistent\n" ,
508
509
pd -> genpd .name , ret );
509
510
510
- for (i = 0 ; i < pd -> num_clks ; i ++ ) {
511
- clk_unprepare (pd -> clks [i ]);
512
- clk_put (pd -> clks [i ]);
513
- }
511
+ clk_bulk_unprepare (pd -> num_clks , pd -> clks );
512
+ clk_bulk_put (pd -> num_clks , pd -> clks );
514
513
515
514
/* protect the zeroing of pm->num_clks */
516
515
mutex_lock (& pd -> pmu -> mutex );
0 commit comments