View | Details | Raw Unified | Return to bug 1958 | Differences between
and this patch

Collapse All | Expand All

(-)linux-2.6.4-bk-acpi/drivers/acpi/processor.c (-108 / +447 lines)
Lines 298-319 acpi_processor_power_activate ( Link Here
298
	struct acpi_processor	*pr,
298
	struct acpi_processor	*pr,
299
	int			state)
299
	int			state)
300
{
300
{
301
	int old_state;
301
	if (!pr)
302
	if (!pr)
302
		return;
303
		return;
303
304
304
	pr->power.states[pr->power.state].promotion.count = 0;
305
	old_state = pr->power.state;
305
	pr->power.states[pr->power.state].demotion.count = 0;
306
307
	pr->power.states[old_state].promotion.count = 0;
308
	pr->power.states[old_state].demotion.count = 0;
306
309
307
	/* Cleanup from old state. */
310
	/* Cleanup from old state. */
308
	switch (pr->power.state) {
311
	switch (pr->power.states[old_state].type) {
309
	case ACPI_STATE_C3:
312
	case ACPI_STATE_C3:
310
		/* Disable bus master reload */
313
		/* Disable bus master reload if the new state is not C3 */
311
		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
314
315
		if (pr->power.states[state].type != ACPI_STATE_C3)
316
			acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
312
		break;
317
		break;
313
	}
318
	}
314
319
315
	/* Prepare to use new state. */
320
	/* Prepare to use new state. */
316
	switch (state) {
321
	switch (pr->power.states[state].type) {
317
	case ACPI_STATE_C3:
322
	case ACPI_STATE_C3:
318
		/* Enable bus master reload */
323
		/* Enable bus master reload */
319
		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK);
324
		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK);
Lines 401-407 acpi_processor_idle (void) Link Here
401
	 * ------
406
	 * ------
402
	 * Invoke the current Cx state to put the processor to sleep.
407
	 * Invoke the current Cx state to put the processor to sleep.
403
	 */
408
	 */
404
	switch (pr->power.state) {
409
	switch (cx->type) {
405
410
406
	case ACPI_STATE_C1:
411
	case ACPI_STATE_C1:
407
		/* Invoke C1. */
412
		/* Invoke C1. */
Lines 418-424 acpi_processor_idle (void) Link Here
418
		/* Get start time (ticks) */
423
		/* Get start time (ticks) */
419
		t1 = inl(acpi_fadt.xpm_tmr_blk.address);
424
		t1 = inl(acpi_fadt.xpm_tmr_blk.address);
420
		/* Invoke C2 */
425
		/* Invoke C2 */
421
		inb(pr->power.states[ACPI_STATE_C2].address);
426
		inb(cx->address);
422
		/* Dummy op - must do something useless after P_LVL2 read */
427
		/* Dummy op - must do something useless after P_LVL2 read */
423
		t2 = inl(acpi_fadt.xpm_tmr_blk.address);
428
		t2 = inl(acpi_fadt.xpm_tmr_blk.address);
424
		/* Get end time (ticks) */
429
		/* Get end time (ticks) */
Lines 435-441 acpi_processor_idle (void) Link Here
435
		/* Get start time (ticks) */
440
		/* Get start time (ticks) */
436
		t1 = inl(acpi_fadt.xpm_tmr_blk.address);
441
		t1 = inl(acpi_fadt.xpm_tmr_blk.address);
437
		/* Invoke C3 */
442
		/* Invoke C3 */
438
		inb(pr->power.states[ACPI_STATE_C3].address);
443
		inb(cx->address);
439
		/* Dummy op - must do something useless after P_LVL3 read */
444
		/* Dummy op - must do something useless after P_LVL3 read */
440
		t2 = inl(acpi_fadt.xpm_tmr_blk.address);
445
		t2 = inl(acpi_fadt.xpm_tmr_blk.address);
441
		/* Get end time (ticks) */
446
		/* Get end time (ticks) */
Lines 511-523 end: Link Here
511
	return;
516
	return;
512
}
517
}
513
518
514
515
static int
519
static int
516
acpi_processor_set_power_policy (
520
acpi_processor_set_power_policy (
517
	struct acpi_processor	*pr)
521
	struct acpi_processor	*pr)
518
{
522
{
523
	int i;
524
519
	ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
525
	ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
520
526
527
	/* XXX rewrite documentation. */
528
521
	/*
529
	/*
522
	 * This function sets the default Cx state policy (OS idle handler).
530
	 * This function sets the default Cx state policy (OS idle handler).
523
	 * Our scheme is to promote quickly to C2 but more conservatively
531
	 * Our scheme is to promote quickly to C2 but more conservatively
Lines 537-614 acpi_processor_set_power_policy ( Link Here
537
	pr->power.state = ACPI_STATE_C1;
545
	pr->power.state = ACPI_STATE_C1;
538
	pr->power.default_state = ACPI_STATE_C1;
546
	pr->power.default_state = ACPI_STATE_C1;
539
547
540
	/*
548
	for (i = 0; i <= pr->power.count; i++) {
541
	 * C1/C2
549
		switch (pr->power.states[i].type) {
542
	 * -----
550
		case ACPI_STATE_C0:
543
	 * Set the default C1 promotion and C2 demotion policies, where we
551
			break;
544
	 * promote from C1 to C2 after several (10) successive C1 transitions,
552
		case ACPI_STATE_C1:
545
	 * as we cannot (currently) measure the time spent in C1. Demote from
553
			break;
546
	 * C2 to C1 anytime we experience a 'short' (time spent in C2 is less
554
		case ACPI_STATE_C2:
547
	 * than the C2 transtion latency).  Note the simplifying assumption 
555
		/*
548
	 * that the 'cost' of a transition is amortized when we sleep for at
556
		 * C1/C2
549
	 * least as long as the transition's latency (thus the total transition
557
		 * -----
550
	 * time is two times the latency).
558
		 * Set the default C1 promotion and C2 demotion policies, where we
551
	 *
559
		 * promote from C1 to C2 after several (10) successive C1 transitions,
552
	 * TBD: Measure C1 sleep times by instrumenting the core IRQ handler.
560
		 * as we cannot (currently) measure the time spent in C1. Demote from
553
	 * TBD: Demote to default C-State after long periods of activity.
561
		 * C2 to C1 anytime we experience a 'short' (time spent in C2 is less
554
	 * TBD: Investigate policy's use of CPU utilization -vs- sleep duration.
562
		 * than the C2 transtion latency).  Note the simplifying assumption 
555
	 */
563
		 * that the 'cost' of a transition is amortized when we sleep for at
556
	if (pr->power.states[ACPI_STATE_C2].valid) {
564
		 * least as long as the transition's latency (thus the total transition
557
		pr->power.states[ACPI_STATE_C1].promotion.threshold.count = 10;
565
		 * time is two times the latency).
558
		pr->power.states[ACPI_STATE_C1].promotion.threshold.ticks =
566
		 *
559
			pr->power.states[ACPI_STATE_C2].latency_ticks;
567
		 * TBD: Measure C1 sleep times by instrumenting the core IRQ handler.
560
		pr->power.states[ACPI_STATE_C1].promotion.state = ACPI_STATE_C2;
568
		 * TBD: Demote to default C-State after long periods of activity.
569
		 * TBD: Investigate policy's use of CPU utilization -vs- sleep duration.
570
		 * XXX update comment.
571
		 */
572
		pr->power.states[i-1].promotion.threshold.count = 10;
573
		pr->power.states[i-1].promotion.threshold.ticks =
574
			pr->power.states[i].latency_ticks;
575
		pr->power.states[i-1].promotion.state = i;
576
577
		pr->power.states[i].demotion.threshold.count = 1;
578
		pr->power.states[i].demotion.threshold.ticks =
579
			pr->power.states[i].latency_ticks;
580
		pr->power.states[i].demotion.state = i-1;
581
			break;
582
  
583
		case ACPI_STATE_C3:
584
		/*
585
		 * C2/C3
586
		 * -----
587
		 * Set default C2 promotion and C3 demotion policies, where we promote
588
		 * from C2 to C3 after several (4) cycles of no bus mastering activity
589
		 * while maintaining sleep time criteria.  Demote immediately on a
590
		 * short or whenever bus mastering activity occurs.
591
		 *
592
		 * XXX update comment.
593
		 */
594
		pr->power.states[i-1].promotion.threshold.count = 4;
595
		pr->power.states[i-1].promotion.threshold.ticks =
596
			pr->power.states[i].latency_ticks;
597
		pr->power.states[i-1].promotion.threshold.bm = 0x0F;
598
		pr->power.states[i-1].promotion.state = i;
599
600
		pr->power.states[i].demotion.threshold.count = 1;
601
		pr->power.states[i].demotion.threshold.ticks =
602
			pr->power.states[i].latency_ticks;
603
		pr->power.states[i].demotion.threshold.bm = 0x0F;
604
		pr->power.states[i].demotion.state = i-1;
605
			break;
606
		default:
607
			return_VALUE(-EINVAL);
608
		}
609
	}
610
611
	return_VALUE(0);
612
}
613
614
615
static int
616
acpi_processor_get_power_info_cst (
617
	struct acpi_processor	*pr)
618
{
619
	acpi_status		status = 0;
620
	acpi_integer		count;
621
	int			i;
622
	struct acpi_buffer	buffer = {ACPI_ALLOCATE_BUFFER, NULL};
623
	union acpi_object	*cst;
624
625
	ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
561
626
562
		pr->power.states[ACPI_STATE_C2].demotion.threshold.count = 1;
627
	status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
563
		pr->power.states[ACPI_STATE_C2].demotion.threshold.ticks =
628
	if (ACPI_FAILURE(status)) {
564
			pr->power.states[ACPI_STATE_C2].latency_ticks;
629
		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
565
		pr->power.states[ACPI_STATE_C2].demotion.state = ACPI_STATE_C1;
630
		return_VALUE(-ENODEV);
566
	}
631
	}
567
632
568
	/*
633
	cst = (union acpi_object *) buffer.pointer;
569
	 * C2/C3
634
570
	 * -----
635
	/* There must be at least 2 elements */
571
	 * Set default C2 promotion and C3 demotion policies, where we promote
636
	if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
572
	 * from C2 to C3 after several (4) cycles of no bus mastering activity
637
		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "not enough elements in _CST\n"));
573
	 * while maintaining sleep time criteria.  Demote immediately on a
638
		status = -EFAULT;
574
	 * short or whenever bus mastering activity occurs.
639
		goto end;
575
	 */
576
	if ((pr->power.states[ACPI_STATE_C2].valid) &&
577
		(pr->power.states[ACPI_STATE_C3].valid)) {
578
		pr->power.states[ACPI_STATE_C2].promotion.threshold.count = 4;
579
		pr->power.states[ACPI_STATE_C2].promotion.threshold.ticks =
580
			pr->power.states[ACPI_STATE_C3].latency_ticks;
581
		pr->power.states[ACPI_STATE_C2].promotion.threshold.bm = 0x0F;
582
		pr->power.states[ACPI_STATE_C2].promotion.state = ACPI_STATE_C3;
583
584
		pr->power.states[ACPI_STATE_C3].demotion.threshold.count = 1;
585
		pr->power.states[ACPI_STATE_C3].demotion.threshold.ticks =
586
			pr->power.states[ACPI_STATE_C3].latency_ticks;
587
		pr->power.states[ACPI_STATE_C3].demotion.threshold.bm = 0x0F;
588
		pr->power.states[ACPI_STATE_C3].demotion.state = ACPI_STATE_C2;
589
	}
640
	}
590
641
591
	return_VALUE(0);
642
	/* First element is of type integer. */
592
}
643
	if (cst->package.elements[0].type != ACPI_TYPE_INTEGER) {
644
		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "first element of _CST is not an integer\n"));
645
		status = -EFAULT;
646
		goto end;
647
	}
593
648
649
	count = cst->package.elements[0].integer.value;
594
650
595
int
651
	/* Validate number of power states. */
596
acpi_processor_get_power_info (
652
	if (count < 1 || count != cst->package.count - 1) {
653
		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "count given by _CST is not valid\n"));
654
		status = -EFAULT;
655
		goto end;
656
	}
657
658
	/* We support up to ACPI_PROCESSOR_MAX_POWER. */
659
	if (count > ACPI_PROCESSOR_MAX_POWER) {
660
		printk(KERN_WARNING "Limiting number of power states to max (%d)\n", ACPI_PROCESSOR_MAX_POWER);
661
		printk(KERN_WARNING "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
662
		count = ACPI_PROCESSOR_MAX_POWER;
663
	}
664
665
	/* Tell driver that at least _CST is supported. */
666
	pr->power.has_cst = 1;
667
668
	/* Get all power states */
669
670
	/* The first state is always C0, which is already filled. */
671
	pr->power.count = 1;
672
	for (i = 1; i <= count; i++) {
673
		union acpi_object *element;
674
		union acpi_object *obj;
675
		struct acpi_power_register *reg;
676
		struct acpi_processor_cx cx;
677
678
		memset(&cx, 0, sizeof(cx));
679
680
		element = (union acpi_object *) &(cst->package.elements[i]);
681
		if (element->type != ACPI_TYPE_PACKAGE)
682
			continue;
683
684
		if (element->package.count != 4)
685
			continue;
686
687
		obj = (union acpi_object *) &(element->package.elements[0]);
688
689
		if (obj->type != ACPI_TYPE_BUFFER)
690
			continue;
691
692
		reg = (struct acpi_power_register *) obj->buffer.pointer;
693
694
		if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
695
			(reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
696
			continue;
697
698
		cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ?
699
			0 : reg->address;
700
701
		/* XXX need to rewrite this. */
702
		obj = (union acpi_object *) &(element->package.elements[1]);
703
		if (obj->type != ACPI_TYPE_INTEGER)
704
			continue;
705
706
		cx.type = obj->integer.value;
707
708
		obj = (union acpi_object *) &(element->package.elements[2]);
709
		if (obj->type != ACPI_TYPE_INTEGER)
710
			continue;
711
712
		cx.latency = obj->integer.value;
713
714
		obj = (union acpi_object *) &(element->package.elements[3]);
715
		if (obj->type != ACPI_TYPE_INTEGER)
716
			continue;
717
718
		cx.power = obj->integer.value;
719
720
		/* Validate this power state */
721
		switch (cx.type) {
722
		case ACPI_STATE_C1:
723
			/*
724
			 * Already filled by our caller.
725
			 * How to handle if there is more than one power state
726
			 * of this type?  Well, I doubt that exist at all.
727
			 */
728
			continue;
729
		case ACPI_STATE_C2:
730
			if (cx.address == 0 || cx.latency > ACPI_PROCESSOR_MAX_C2_LATENCY)
731
				continue;
732
			/*
733
			 * Only support C2 on UP systems (see TBD above).
734
			 */
735
			if (errata.smp) {
736
				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
737
					"C2 not supported in SMP mode\n"));
738
				continue;
739
			}
740
			break;
741
742
		case ACPI_STATE_C3:
743
			if (cx.address == 0 || cx.latency > 1000)
744
				continue;
745
			/*
746
			 * Only support C3 when bus mastering arbitration
747
			 * control is present (able to disable bus mastering
748
			 * to maintain cache coherency while in C3).
749
			 */
750
			if (!pr->flags.bm_control) {
751
				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
752
					"C3 support requires bus mastering control\n"));
753
				continue;
754
			}
755
			/*
756
			 * Only support C3 on UP systems, as bm_control is
757
			 * only viable on a UP system and flushing caches
758
			 * (e.g. WBINVD) is simply too costly (at this time).
759
			 */
760
			if (errata.smp) {
761
				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
762
					"C3 not supported in SMP mode\n"));
763
				continue;
764
			}
765
			/*
766
			 * PIIX4 Erratum #18: We don't support C3 when
767
			 * Type-F (fast) DMA transfers are used by any
768
			 * ISA device to avoid livelock.
769
			 * Note that we could disable Type-F DMA (as
770
			 * recommended by the erratum), but this is known
771
			 * to disrupt certain ISA devices thus we take
772
			 * the conservative approach.
773
			 */
774
			if (errata.piix4.fdma) {
775
				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
776
					"C3 not supported on PIIX4 with Type-F DMA\n"));
777
				continue;
778
			}
779
			/*
780
			 * Otherwise we've met all of our C3 requirements.  
781
			 * Normalize the C2 latency to expidite policy.
782
			 * Enable checking of bus mastering status
783
			 * (bm_check) so we can use this in our C3 policy.
784
			 */
785
			pr->flags.bm_check = 1;
786
			break;
787
		default:
788
			ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
789
					"Unknow Cx state.  Contact your bios manufacturer.\n"));
790
			continue;
791
		}
792
793
		/* All check done.  Register this power state. */
794
		cx.valid = 1;
795
		cx.latency_ticks = US_TO_PM_TIMER_TICKS(cx.latency);
796
		(pr->power.count)++;
797
		memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx));
798
	}
799
800
	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", pr->power.count));
801
802
	/* Validate number of power states discovered */
803
	if (pr->power.count < 2)
804
		status = -ENODEV;
805
806
end:
807
	acpi_os_free(buffer.pointer);
808
809
	return_VALUE(status);
810
}
811
812
static int
813
acpi_processor_cst_has_changed (
597
	struct acpi_processor	*pr)
814
	struct acpi_processor	*pr)
598
{
815
{
599
	int			result = 0;
816
	int			result = 0;
600
817
601
	ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
818
	ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
602
819
603
	if (!pr)
820
	if (!pr)
604
		return_VALUE(-EINVAL);
821
		return_VALUE(-EINVAL);
605
822
606
	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
823
	if (errata.smp) {
607
		"lvl2[0x%08x] lvl3[0x%08x]\n",
824
		return_VALUE(-EINVAL);
608
		pr->power.states[ACPI_STATE_C2].address,
825
	}
609
		pr->power.states[ACPI_STATE_C3].address));
610
826
611
	/* TBD: Support ACPI 2.0 objects */
827
	if (!pr->power.has_cst) {
828
		return_VALUE(-EINVAL);
829
	}
830
831
	/* Install the default idle loop */
832
833
	pm_idle = pm_idle_save;
834
	pm_idle_save = NULL;
612
835
613
	/*
836
	/*
614
	 * C0
837
	 * C0
Lines 616-621 acpi_processor_get_power_info ( Link Here
616
	 * This state exists only as filler in our array.
839
	 * This state exists only as filler in our array.
617
	 */
840
	 */
618
	pr->power.states[ACPI_STATE_C0].valid = 1;
841
	pr->power.states[ACPI_STATE_C0].valid = 1;
842
	pr->power.states[ACPI_STATE_C0].type = ACPI_STATE_C0;
619
843
620
	/*
844
	/*
621
	 * C1
845
	 * C1
Lines 625-642 acpi_processor_get_power_info ( Link Here
625
	 * TBD: What about PROC_C1?
849
	 * TBD: What about PROC_C1?
626
	 */
850
	 */
627
	pr->power.states[ACPI_STATE_C1].valid = 1;
851
	pr->power.states[ACPI_STATE_C1].valid = 1;
852
	pr->power.states[ACPI_STATE_C1].type = 1;
853
	pr->power.count = 1;
854
855
	/* Re-evaluate _CST for this processor */
856
	result = acpi_processor_get_power_info_cst(pr);
628
857
629
	/*
858
	/*
630
	 * C2
859
	 * Reset Default Policy
631
	 * --
860
	 * --------------------
632
	 * We're (currently) only supporting C2 on UP systems.
861
	 * Now that we know which state are supported, set the default
633
	 *
862
	 * policy.  Note that this policy can be changed dynamically
634
	 * TBD: Support for C2 on MP (P_LVL2_UP).
863
	 * (e.g. encourage deeper sleeps to conserve battery life when
864
	 * not on AC).
635
	 */
865
	 */
636
	if (pr->power.states[ACPI_STATE_C2].address) {
866
	result = acpi_processor_set_power_policy(pr);
637
867
868
	/*
869
	 * (re)-install the acpi idle loop if needed.
870
	 */
871
	if (pr->power.count > 1) {
872
		pr->flags.power = 1;
873
		pm_idle_save = pm_idle;
874
		pm_idle = acpi_processor_idle;
875
	} else {
876
		pr->flags.power = 0;
877
	}
878
879
	return_VALUE(result);
880
}
881
882
static int
883
acpi_processor_get_power_info_fadt (
884
	struct acpi_processor	*pr)
885
{
886
	int			i;
887
888
	ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
889
890
	if (!pr)
891
		return_VALUE(-EINVAL);
892
893
	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
894
		"lvl2[0x%08x] lvl3[0x%08x]\n",
895
		pr->power.states[ACPI_STATE_C2].address,
896
		pr->power.states[ACPI_STATE_C3].address));
897
898
	/*
899
	 * cleanup all C states, since acpi_processor_get_power_info_cst
900
	 * may have 'corrupted' them.
901
	 */
902
903
	for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
904
		memset(pr->power.states, 0, sizeof(struct acpi_processor_cx));
905
906
	if (pr->pblk) {
907
		pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
638
		pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
908
		pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
639
909
910
		pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
911
		pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
912
913
		/*
914
		 * C2
915
		 * --
916
		 * We're (currently) only supporting C2 on UP systems.
917
		 *
918
		 * TBD: Support for C2 on MP (P_LVL2_UP).
919
		 */
920
640
		/*
921
		/*
641
		 * C2 latency must be less than or equal to 100 microseconds.
922
		 * C2 latency must be less than or equal to 100 microseconds.
642
		 */
923
		 */
Lines 656-675 acpi_processor_get_power_info ( Link Here
656
		 */
937
		 */
657
		else {
938
		else {
658
			pr->power.states[ACPI_STATE_C2].valid = 1;
939
			pr->power.states[ACPI_STATE_C2].valid = 1;
940
			pr->power.states[ACPI_STATE_C2].type = 2;
659
			pr->power.states[ACPI_STATE_C2].latency_ticks = 
941
			pr->power.states[ACPI_STATE_C2].latency_ticks = 
660
				US_TO_PM_TIMER_TICKS(acpi_fadt.plvl2_lat);
942
				US_TO_PM_TIMER_TICKS(acpi_fadt.plvl2_lat);
943
			pr->power.count = 2;
661
		}
944
		}
662
	}
945
		/*
663
946
		 * C3
664
	/*
947
		 * --
665
	 * C3
948
		 * TBD: Investigate use of WBINVD on UP/SMP system in absence of
666
	 * --
949
		 *	bm_control.
667
	 * TBD: Investigate use of WBINVD on UP/SMP system in absence of
950
		 */
668
	 *	bm_control.
669
	 */
670
	if (pr->power.states[ACPI_STATE_C3].address) {
671
672
		pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
673
951
674
		/*
952
		/*
675
		 * C3 latency must be less than or equal to 1000 microseconds.
953
		 * C3 latency must be less than or equal to 1000 microseconds.
Lines 713-724 acpi_processor_get_power_info ( Link Here
713
		 */
991
		 */
714
		else {
992
		else {
715
			pr->power.states[ACPI_STATE_C3].valid = 1;
993
			pr->power.states[ACPI_STATE_C3].valid = 1;
994
			pr->power.states[ACPI_STATE_C3].type = 3;
716
			pr->power.states[ACPI_STATE_C3].latency_ticks = 
995
			pr->power.states[ACPI_STATE_C3].latency_ticks = 
717
				US_TO_PM_TIMER_TICKS(acpi_fadt.plvl3_lat);
996
				US_TO_PM_TIMER_TICKS(acpi_fadt.plvl3_lat);
718
			pr->flags.bm_check = 1;
997
			pr->flags.bm_check = 1;
998
			pr->power.count = 3;
719
		}
999
		}
720
	}
1000
	}
721
1001
1002
	return_VALUE(0);
1003
}
1004
1005
int
1006
acpi_processor_get_power_info (
1007
	struct acpi_processor	*pr)
1008
{
1009
	int			result = 0;
1010
1011
	ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
1012
1013
	if (!pr)
1014
		return_VALUE(-EINVAL);
1015
1016
	if (errata.smp) {
1017
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1018
			"Power states not supported in SMP mode\n"));
1019
		return_VALUE(-EINVAL);
1020
	}
1021
1022
	/*
1023
	 * C0
1024
	 * --
1025
	 * This state exists only as filler in our array.
1026
	 */
1027
	pr->power.states[ACPI_STATE_C0].valid = 1;
1028
	pr->power.states[ACPI_STATE_C0].type = ACPI_STATE_C0;
1029
1030
	/*
1031
	 * C1
1032
	 * --
1033
	 * ACPI requires C1 support for all processors.
1034
	 *
1035
	 * TBD: What about PROC_C1?
1036
	 */
1037
	pr->power.states[ACPI_STATE_C1].valid = 1;
1038
	pr->power.states[ACPI_STATE_C1].type = 1;
1039
	pr->power.count = 1;
1040
1041
	/*
1042
	 *   Look for ACPI V2.0 objects at first, then look into FADT
1043
	 *   if that failed. */
1044
	result = acpi_processor_get_power_info_cst(pr);
1045
	if (ACPI_FAILURE(result))
1046
		result = acpi_processor_get_power_info_fadt(pr);
722
	/*
1047
	/*
723
	 * Set Default Policy
1048
	 * Set Default Policy
724
	 * ------------------
1049
	 * ------------------
Lines 735-746 acpi_processor_get_power_info ( Link Here
735
	 * If this processor supports C2 or C3 we denote it as being 'power
1060
	 * If this processor supports C2 or C3 we denote it as being 'power
736
	 * manageable'.  Note that there's really no policy involved for
1061
	 * manageable'.  Note that there's really no policy involved for
737
	 * when only C1 is supported.
1062
	 * when only C1 is supported.
1063
	 *
1064
	 * Note that if the has_cst bit is set, then this flag may change.
1065
	 * See acpi_processor_cst_has_changed().
738
	 */
1066
	 */
739
	if (pr->power.states[ACPI_STATE_C2].valid 
1067
	if (pr->power.count > 1)
740
		|| pr->power.states[ACPI_STATE_C3].valid)
741
		pr->flags.power = 1;
1068
		pr->flags.power = 1;
742
1069
743
	return_VALUE(0);
1070
	return_VALUE(result);
744
}
1071
}
745
1072
746
1073
Lines 1866-1873 static int acpi_processor_power_seq_show Link Here
1866
2193
1867
	seq_puts(seq, "states:\n");
2194
	seq_puts(seq, "states:\n");
1868
2195
1869
	for (i = 1; i < ACPI_C_STATE_COUNT; i++) {
2196
	for (i = 1; i <= pr->power.count; i++) {
1870
		seq_printf(seq, "   %cC%d:                  ", 
2197
		seq_printf(seq, "   %cC%d:         ", 
1871
			(i == pr->power.state?'*':' '), i);
2198
			(i == pr->power.state?'*':' '), i);
1872
2199
1873
		if (!pr->power.states[i].valid) {
2200
		if (!pr->power.states[i].valid) {
Lines 1875-1880 static int acpi_processor_power_seq_show Link Here
1875
			continue;
2202
			continue;
1876
		}
2203
		}
1877
2204
2205
		seq_printf(seq, "type[%d] ", pr->power.states[i].type);
2206
1878
		if (pr->power.states[i].promotion.state)
2207
		if (pr->power.states[i].promotion.state)
1879
			seq_printf(seq, "promotion[C%d] ",
2208
			seq_printf(seq, "promotion[C%d] ",
1880
				pr->power.states[i].promotion.state);
2209
				pr->power.states[i].promotion.state);
Lines 2209-2221 acpi_processor_get_info ( Link Here
2209
		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid PBLK length [%d]\n",
2538
		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid PBLK length [%d]\n",
2210
			object.processor.pblk_length));
2539
			object.processor.pblk_length));
2211
	else {
2540
	else {
2541
		pr->pblk = object.processor.pblk_address;
2542
2212
		pr->throttling.address = object.processor.pblk_address;
2543
		pr->throttling.address = object.processor.pblk_address;
2213
		pr->throttling.duty_offset = acpi_fadt.duty_offset;
2544
		pr->throttling.duty_offset = acpi_fadt.duty_offset;
2214
		pr->throttling.duty_width = acpi_fadt.duty_width;
2545
		pr->throttling.duty_width = acpi_fadt.duty_width;
2215
		pr->power.states[ACPI_STATE_C2].address =
2216
			object.processor.pblk_address + 4;
2217
		pr->power.states[ACPI_STATE_C3].address =
2218
			object.processor.pblk_address + 5;
Lines 2229-2234 acpi_processor_get_info ( Link Here
2538
       }
2539
2540
       /* Take ownership for power states control (since we support _CST,
2541
        * but only if compiled for UP systems */
2542
       if (!errata.smp && pr->id == 0 && acpi_fadt.cst_cnt) {
2543
               status = acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
2544
               if (ACPI_FAILURE(status)) {
2545
                       ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
2546
                               "Power management ownership over BIOS failed.\n"));
2547
2548
               }
Lines 2253-2259 acpi_processor_notify ( Link Here
2253
			pr->performance_platform_limit);
2591
			pr->performance_platform_limit);
2254
		break;
2592
		break;
2255
	case ACPI_PROCESSOR_NOTIFY_POWER:
2593
	case ACPI_PROCESSOR_NOTIFY_POWER:
2256
		/* TBD */
2594
		acpi_processor_cst_has_changed(pr);
2595
		/* TBD '0' is most likely not appropriate */
2257
		acpi_bus_generate_event(device, event, 0);
2596
		acpi_bus_generate_event(device, event, 0);
2258
		break;
2597
		break;
2259
	default:
2598
	default:
Lines 2298-2303 acpi_processor_add ( Link Here
2298
	if (result)
2637
	if (result)
2299
		goto end;
2638
		goto end;
2300
2639
2640
	/*
2641
	 * Install the idle handler if processor power management is supported.
2642
	 * Note that the default idle handler (default_idle) will be used on 
2643
	 * platforms that only support C1.
2644
	 */
2645
	if ((pr->id == 0) && (pr->flags.power)) {
2646
		pm_idle_save = pm_idle;
2647
		pm_idle = acpi_processor_idle;
2648
	}
2649
2301
	status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 
2650
	status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 
2302
		acpi_processor_notify, pr);
2651
		acpi_processor_notify, pr);
2303
	if (ACPI_FAILURE(status)) {
2652
	if (ACPI_FAILURE(status)) {
Lines 2309-2327 acpi_processor_add ( Link Here
2309
2658
2310
	processors[pr->id] = pr;
2659
	processors[pr->id] = pr;
2311
2660
2312
	/*
2313
	 * Install the idle handler if processor power management is supported.
2314
	 * Note that the default idle handler (default_idle) will be used on 
2315
	 * platforms that only support C1.
2316
	 */
2317
	if ((pr->id == 0) && (pr->flags.power)) {
2318
		pm_idle_save = pm_idle;
2319
		pm_idle = acpi_processor_idle;
2320
	}
2321
	
2322
	printk(KERN_INFO PREFIX "%s [%s] (supports",
2661
	printk(KERN_INFO PREFIX "%s [%s] (supports",
2323
		acpi_device_name(device), acpi_device_bid(device));
2662
		acpi_device_name(device), acpi_device_bid(device));
2324
	for (i=1; i<ACPI_C_STATE_COUNT; i++)
2663
	for (i = 1; i <= pr->power.count; i++)
2325
		if (pr->power.states[i].valid)
2664
		if (pr->power.states[i].valid)
2326
			printk(" C%d", i);
2665
			printk(" C%d", i);
2327
	if (pr->flags.throttling)
2666
	if (pr->flags.throttling)
Lines 2353-2362 acpi_processor_remove ( Link Here
2353
2692
2354
	pr = (struct acpi_processor *) acpi_driver_data(device);
2693
	pr = (struct acpi_processor *) acpi_driver_data(device);
2355
2694
2356
	/* Unregister the idle handler when processor #0 is removed. */
2357
	if (pr->id == 0)
2358
		pm_idle = pm_idle_save;
2359
2360
	status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 
2695
	status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 
2361
		acpi_processor_notify);
2696
		acpi_processor_notify);
2362
	if (ACPI_FAILURE(status)) {
2697
	if (ACPI_FAILURE(status)) {
Lines 2364-2369 acpi_processor_remove ( Link Here
2364
			"Error removing notify handler\n"));
2699
			"Error removing notify handler\n"));
2365
	}
2700
	}
2366
2701
2702
	/* Unregister the idle handler when processor #0 is removed. */
2703
	if (pr->id == 0)
2704
		pm_idle = pm_idle_save;
2705
2367
	acpi_processor_remove_fs(device);
2706
	acpi_processor_remove_fs(device);
2368
2707
2369
	processors[pr->id] = NULL;
2708
	processors[pr->id] = NULL;
(-)linux-2.6.4-bk-acpi/include/acpi/processor.h (-1 / +16 lines)
Lines 5-11 Link Here
5
5
6
#define ACPI_PROCESSOR_BUSY_METRIC	10
6
#define ACPI_PROCESSOR_BUSY_METRIC	10
7
7
8
#define ACPI_PROCESSOR_MAX_POWER	ACPI_C_STATE_COUNT
8
#define ACPI_PROCESSOR_MAX_POWER	8
9
#define ACPI_PROCESSOR_MAX_C2_LATENCY	100
9
#define ACPI_PROCESSOR_MAX_C2_LATENCY	100
10
#define ACPI_PROCESSOR_MAX_C3_LATENCY	1000
10
#define ACPI_PROCESSOR_MAX_C3_LATENCY	1000
11
11
Lines 15-20 Link Here
15
15
16
/* Power Management */
16
/* Power Management */
17
17
18
struct acpi_power_register {
19
	u8			descriptor;
20
	u16			length;
21
	u8			space_id;
22
	u8			bit_width;
23
	u8			bit_offset;
24
	u8			reserved;
25
	u64			address;
26
} __attribute__ ((packed));
27
28
18
struct acpi_processor_cx_policy {
29
struct acpi_processor_cx_policy {
19
	u32			count;
30
	u32			count;
20
	int			state;
31
	int			state;
Lines 28-33 struct acpi_processor_cx_policy { Link Here
28
39
29
struct acpi_processor_cx {
40
struct acpi_processor_cx {
30
	u8			valid;
41
	u8			valid;
42
	u32			type;
31
	u32			address;
43
	u32			address;
32
	u32			latency;
44
	u32			latency;
33
	u32			latency_ticks;
45
	u32			latency_ticks;
Lines 40-45 struct acpi_processor_cx { Link Here
40
struct acpi_processor_power {
52
struct acpi_processor_power {
41
	int			state;
53
	int			state;
42
	int			default_state;
54
	int			default_state;
55
	int			count;
56
	int			has_cst;
43
	u32			bm_activity;
57
	u32			bm_activity;
44
	struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
58
	struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
45
};
59
};
Lines 125-130 struct acpi_processor { Link Here
125
	acpi_handle		handle;
139
	acpi_handle		handle;
126
	u32			acpi_id;
140
	u32			acpi_id;
127
	u32			id;
141
	u32			id;
142
	u32			pblk;
128
	int			performance_platform_limit;
143
	int			performance_platform_limit;
129
	struct acpi_processor_flags flags;
144
	struct acpi_processor_flags flags;
130
	struct acpi_processor_power power;
145
	struct acpi_processor_power power;

Return to bug 1958