@@ -296,6 +296,73 @@ def gpu_pool(self, tmpdir, monkeypatch):
296
296
297
297
@pytest .mark .parametrize (
298
298
'cluster_manager,user_spark_opts,expected_output' , [
299
+ # dynamic resource allocation enabled
300
+ (
301
+ 'kubernetes' ,
302
+ {
303
+ 'spark.dynamicAllocation.enabled' : 'true' ,
304
+ 'spark.executor.cores' : '4' ,
305
+ 'spark.cores.max' : '128' ,
306
+ },
307
+ {
308
+ 'spark.executor.memory' : '4g' ,
309
+ 'spark.executor.cores' : '4' ,
310
+ 'spark.executor.instances' : '2' ,
311
+ 'spark.kubernetes.executor.limit.cores' : '4' ,
312
+ 'spark.kubernetes.allocation.batch.size' : '2' ,
313
+ 'spark.scheduler.maxRegisteredResourcesWaitingTime' : '15min' ,
314
+ },
315
+ ),
316
+ (
317
+ 'kubernetes' ,
318
+ {
319
+ 'spark.dynamicAllocation.enabled' : 'true' ,
320
+ 'spark.dynamicAllocation.maxExecutors' : '512' ,
321
+ 'spark.dynamicAllocation.minExecutors' : '128' ,
322
+ 'spark.dynamicAllocation.initialExecutors' : '128' ,
323
+ 'spark.executor.cores' : '4' ,
324
+ },
325
+ {
326
+ 'spark.executor.memory' : '4g' ,
327
+ 'spark.executor.cores' : '4' ,
328
+ 'spark.executor.instances' : '2' ,
329
+ 'spark.kubernetes.executor.limit.cores' : '4' ,
330
+ 'spark.kubernetes.allocation.batch.size' : '2' ,
331
+ 'spark.scheduler.maxRegisteredResourcesWaitingTime' : '15min' ,
332
+ },
333
+ ),
334
+ # dynamic resource allocation disabled with instances specified
335
+ (
336
+ 'kubernetes' ,
337
+ {
338
+ 'spark.dynamicAllocation.enabled' : 'false' ,
339
+ 'spark.executor.instances' : '600' ,
340
+ },
341
+ {
342
+ 'spark.executor.memory' : '4g' ,
343
+ 'spark.executor.cores' : '2' ,
344
+ 'spark.executor.instances' : '600' ,
345
+ 'spark.kubernetes.executor.limit.cores' : '2' ,
346
+ 'spark.kubernetes.allocation.batch.size' : '512' ,
347
+ 'spark.scheduler.maxRegisteredResourcesWaitingTime' : '35min' ,
348
+ },
349
+ ),
350
+ # dynamic resource allocation disabled with instances not specified
351
+ (
352
+ 'kubernetes' ,
353
+ {
354
+ 'spark.executor.cores' : '4' ,
355
+ 'spark.cores.max' : '128' ,
356
+ },
357
+ {
358
+ 'spark.executor.memory' : '4g' ,
359
+ 'spark.executor.cores' : '4' ,
360
+ 'spark.executor.instances' : '32' ,
361
+ 'spark.kubernetes.executor.limit.cores' : '4' ,
362
+ 'spark.kubernetes.allocation.batch.size' : '32' ,
363
+ 'spark.scheduler.maxRegisteredResourcesWaitingTime' : '16min' ,
364
+ },
365
+ ),
299
366
# use default k8s settings
300
367
(
301
368
'kubernetes' ,
@@ -498,6 +565,43 @@ def test_append_event_log_conf(
498
565
({'spark.executor.instances' : '10' , 'spark.executor.cores' : '3' }, '60' ),
499
566
# user defined
500
567
({'spark.sql.shuffle.partitions' : '300' }, '300' ),
568
+ # dynamic resource allocation enabled, both maxExecutors and max cores defined
569
+ (
570
+ {
571
+ 'spark.dynamicAllocation.enabled' : 'true' ,
572
+ 'spark.dynamicAllocation.maxExecutors' : '128' ,
573
+ 'spark.executor.cores' : '3' ,
574
+ 'spark.cores.max' : '10' ,
575
+ },
576
+ '768' , # max (2 * (max cores), 2 * (maxExecutors * executor cores))
577
+ ),
578
+ # dynamic resource allocation enabled maxExecutors not defined, max cores defined
579
+ (
580
+ {
581
+ 'spark.dynamicAllocation.enabled' : 'true' ,
582
+ 'spark.executor.cores' : '3' ,
583
+ 'spark.cores.max' : '10' ,
584
+ },
585
+ '20' , # 2 * max cores
586
+ ),
587
+ # dynamic resource allocation enabled maxExecutors not defined, max cores not defined
588
+ (
589
+ {
590
+ 'spark.dynamicAllocation.enabled' : 'true' ,
591
+ 'spark.executor.cores' : '3' ,
592
+ },
593
+ '128' , # DEFAULT_SQL_SHUFFLE_PARTITIONS
594
+ ),
595
+ # dynamic resource allocation enabled maxExecutors infinity
596
+ (
597
+ {
598
+ 'spark.dynamicAllocation.enabled' : 'true' ,
599
+ 'spark.dynamicAllocation.maxExecutors' : 'infinity' ,
600
+ 'spark.executor.cores' : '3' ,
601
+ 'spark.cores.max' : '10' ,
602
+ },
603
+ '20' , # 2 * max cores
604
+ ),
501
605
],
502
606
)
503
607
def test_append_sql_shuffle_partitions_conf (
0 commit comments