View | Details | Raw Unified | Return to bug 219087 | Differences between
and this patch

Collapse All | Expand All

(-)a/include/sound/memalloc.h (-6 / +1 lines)
Lines 42-58 struct snd_dma_device { Link Here
42
#define SNDRV_DMA_TYPE_NONCONTIG	8	/* non-coherent SG buffer */
42
#define SNDRV_DMA_TYPE_NONCONTIG	8	/* non-coherent SG buffer */
43
#define SNDRV_DMA_TYPE_NONCOHERENT	9	/* non-coherent buffer */
43
#define SNDRV_DMA_TYPE_NONCOHERENT	9	/* non-coherent buffer */
44
#ifdef CONFIG_SND_DMA_SGBUF
44
#ifdef CONFIG_SND_DMA_SGBUF
45
#define SNDRV_DMA_TYPE_DEV_SG		SNDRV_DMA_TYPE_NONCONTIG
45
#define SNDRV_DMA_TYPE_DEV_SG		3	/* S/G pages */
46
#define SNDRV_DMA_TYPE_DEV_WC_SG	6	/* SG write-combined */
46
#define SNDRV_DMA_TYPE_DEV_WC_SG	6	/* SG write-combined */
47
#else
47
#else
48
#define SNDRV_DMA_TYPE_DEV_SG	SNDRV_DMA_TYPE_DEV /* no SG-buf support */
48
#define SNDRV_DMA_TYPE_DEV_SG	SNDRV_DMA_TYPE_DEV /* no SG-buf support */
49
#define SNDRV_DMA_TYPE_DEV_WC_SG	SNDRV_DMA_TYPE_DEV_WC
49
#define SNDRV_DMA_TYPE_DEV_WC_SG	SNDRV_DMA_TYPE_DEV_WC
50
#endif
50
#endif
51
/* fallback types, don't use those directly */
52
#ifdef CONFIG_SND_DMA_SGBUF
53
#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK		10
54
#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK	11
55
#endif
56
51
57
/*
52
/*
58
 * info for buffer allocation
53
 * info for buffer allocation
(-)a/sound/core/memalloc.c (-65 / +33 lines)
Lines 26-35 Link Here
26
26
27
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
27
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
28
28
29
#ifdef CONFIG_SND_DMA_SGBUF
30
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
31
#endif
32
33
static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
29
static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
34
{
30
{
35
	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
31
	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
Lines 559-574 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) Link Here
559
	struct sg_table *sgt;
555
	struct sg_table *sgt;
560
	void *p;
556
	void *p;
561
557
562
#ifdef CONFIG_SND_DMA_SGBUF
563
	if (cpu_feature_enabled(X86_FEATURE_XENPV))
564
		return snd_dma_sg_fallback_alloc(dmab, size);
565
#endif
566
	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
558
	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
567
				      DEFAULT_GFP, 0);
559
				      DEFAULT_GFP, 0);
568
#ifdef CONFIG_SND_DMA_SGBUF
569
	if (!sgt && !get_dma_ops(dmab->dev.dev))
570
		return snd_dma_sg_fallback_alloc(dmab, size);
571
#endif
572
	if (!sgt)
560
	if (!sgt)
573
		return NULL;
561
		return NULL;
574
562
Lines 687-728 static const struct snd_malloc_ops snd_dma_noncontig_ops = { Link Here
687
675
688
/* x86-specific SG-buffer with WC pages */
676
/* x86-specific SG-buffer with WC pages */
689
#ifdef CONFIG_SND_DMA_SGBUF
677
#ifdef CONFIG_SND_DMA_SGBUF
690
static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
691
{
692
	void *p = snd_dma_noncontig_alloc(dmab, size);
693
694
	if (!p)
695
		return NULL;
696
	if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
697
		return p;
698
	mark_wc_pages(p, size);
699
	return p;
700
}
701
702
static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
703
{
704
	unmark_wc_pages(dmab->area, dmab->bytes);
705
	snd_dma_noncontig_free(dmab);
706
}
707
708
static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
709
			      struct vm_area_struct *area)
710
{
711
	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
712
	return dma_mmap_noncontiguous(dmab->dev.dev, area,
713
				      dmab->bytes, dmab->private_data);
714
}
715
716
static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
717
	.alloc = snd_dma_sg_wc_alloc,
718
	.free = snd_dma_sg_wc_free,
719
	.mmap = snd_dma_sg_wc_mmap,
720
	.sync = snd_dma_noncontig_sync,
721
	.get_addr = snd_dma_noncontig_get_addr,
722
	.get_page = snd_dma_noncontig_get_page,
723
	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
724
};
725
726
/* Fallback SG-buffer allocations for x86 */
678
/* Fallback SG-buffer allocations for x86 */
727
struct snd_dma_sg_fallback {
679
struct snd_dma_sg_fallback {
728
	bool use_dma_alloc_coherent;
680
	bool use_dma_alloc_coherent;
Lines 760-765 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab, Link Here
760
	kfree(sgbuf);
712
	kfree(sgbuf);
761
}
713
}
762
714
715
/* fallback manual S/G buffer allocations */
763
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
716
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
764
{
717
{
765
	struct snd_dma_sg_fallback *sgbuf;
718
	struct snd_dma_sg_fallback *sgbuf;
Lines 769-780 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) Link Here
769
	dma_addr_t addr;
722
	dma_addr_t addr;
770
	void *p;
723
	void *p;
771
724
772
	/* correct the type */
773
	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG)
774
		dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
775
	else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
776
		dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
777
778
	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
725
	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
779
	if (!sgbuf)
726
	if (!sgbuf)
780
		return NULL;
727
		return NULL;
Lines 819-825 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) Link Here
819
	if (!p)
766
	if (!p)
820
		goto error;
767
		goto error;
821
768
822
	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
769
	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
823
		set_pages_array_wc(sgbuf->pages, sgbuf->count);
770
		set_pages_array_wc(sgbuf->pages, sgbuf->count);
824
771
825
	dmab->private_data = sgbuf;
772
	dmab->private_data = sgbuf;
Lines 836-842 static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab) Link Here
836
{
783
{
837
	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
784
	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
838
785
839
	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
786
	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
840
		set_pages_array_wb(sgbuf->pages, sgbuf->count);
787
		set_pages_array_wb(sgbuf->pages, sgbuf->count);
841
	vunmap(dmab->area);
788
	vunmap(dmab->area);
842
	__snd_dma_sg_fallback_free(dmab, dmab->private_data);
789
	__snd_dma_sg_fallback_free(dmab, dmab->private_data);
Lines 856-868 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab, Link Here
856
{
803
{
857
	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
804
	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
858
805
859
	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
806
	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
860
		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
807
		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
861
	return vm_map_pages(area, sgbuf->pages, sgbuf->count);
808
	return vm_map_pages(area, sgbuf->pages, sgbuf->count);
862
}
809
}
863
810
864
static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
811
static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
865
	.alloc = snd_dma_sg_fallback_alloc,
812
{
813
	int type = dmab->dev.type;
814
	void *p;
815
816
	if (cpu_feature_enabled(X86_FEATURE_XENPV))
817
		return snd_dma_sg_fallback_alloc(dmab, size);
818
819
	/* try the standard DMA API allocation at first */
820
	if (type == SNDRV_DMA_TYPE_DEV_WC_SG)
821
		dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC;
822
	else
823
		dmab->dev.type = SNDRV_DMA_TYPE_DEV;
824
	p = __snd_dma_alloc_pages(dmab, size);
825
	if (p)
826
		return p;
827
828
	dmab->dev.type = type; /* restore the type */
829
	/* if IOMMU is present but failed, give up */
830
	if (get_dma_ops(dmab->dev.dev))
831
		return NULL;
832
	/* try fallback */
833
	return snd_dma_sg_fallback_alloc(dmab, size);
834
}
835
836
static const struct snd_malloc_ops snd_dma_sg_ops = {
837
	.alloc = snd_dma_sg_alloc,
866
	.free = snd_dma_sg_fallback_free,
838
	.free = snd_dma_sg_fallback_free,
867
	.mmap = snd_dma_sg_fallback_mmap,
839
	.mmap = snd_dma_sg_fallback_mmap,
868
	.get_addr = snd_dma_sg_fallback_get_addr,
840
	.get_addr = snd_dma_sg_fallback_get_addr,
Lines 936-950 static const struct snd_malloc_ops *snd_dma_ops[] = { Link Here
936
	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
908
	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
937
	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
909
	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
938
#ifdef CONFIG_SND_DMA_SGBUF
910
#ifdef CONFIG_SND_DMA_SGBUF
939
	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
911
	[SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
912
	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
940
#endif
913
#endif
941
#ifdef CONFIG_GENERIC_ALLOCATOR
914
#ifdef CONFIG_GENERIC_ALLOCATOR
942
	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
915
	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
943
#endif /* CONFIG_GENERIC_ALLOCATOR */
916
#endif /* CONFIG_GENERIC_ALLOCATOR */
944
#ifdef CONFIG_SND_DMA_SGBUF
945
	[SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
946
	[SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
947
#endif
948
#endif /* CONFIG_HAS_DMA */
917
#endif /* CONFIG_HAS_DMA */
949
};
918
};
950
919
951
- 

Return to bug 219087