summaryrefslogtreecommitdiff
path: root/common/lib/libc/arch/aarch64/string/memset.S
blob: 1d6e5186ff889988c8e2fbd48d12d957fda8a0d5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
/* $NetBSD: memset.S,v 1.1 2014/08/10 05:47:35 matt Exp $ */

/*-
 * Copyright (c) 2014 The NetBSD Foundation, Inc.
 * All rights reserved.
 *
 * This code is derived from software contributed to The NetBSD Foundation
 * by Matt Thomas of 3am Software Foundry.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <machine/asm.h>

ENTRY(memset)
	cbz	x2, .Lret
	mov	x15, x0			/* working data pointer */
	cbz	x1, .Lzerofill
	cbz	x1, .Lfilled
	/*
	 * Non zero fill, replicate to all 64 bits of x1.
	 */
	and	x1, x1, #0xff
	orr	x1, x1, x1, lsl #8
	orr	x1, x1, x1, lsl #16
	orr	x1, x1, x1, lsl #32
.Lfilled:
	cmp	x2, #15			/* if it's small, ignore alignment */
	b.ls	.Llast_subqword

	mov	x6, x1
	tst	x15, #15
	b.eq	.Lqword_loop

/*
 * We have at least 15 to copy which means we can get qword alignment
 * without having to check the amount left.
 */
	tbz	x15, #0, .Lhword_aligned
	strb	w1, [x15], #1
.Lhword_aligned:
	tbz	x15, #1, .Lword_aligned
	strh	w1, [x15], #2
.Lword_aligned:
	tbz	x15, #2, .Ldword_aligned
	str	w1, [x15], #4
.Ldword_aligned:
	tbz	x15, #3, .Lqword_aligned
	str	x1, [x15], #8
/*
 * Now we qword aligned. Figure how much we have to write to get here.
 * Then subtract from the length.  If we get 0, we're done.
 */
.Lqword_aligned:
	sub	x5, x15, x0 
	subs	x2, x2, x5
	b.eq	.Lret

/*
 * Write 16 bytes at time.  If we don't have 16 bytes to write, bail.
 * Keep looping if there's data to set.
 */
.Lqword_loop:
	subs	x2, x2, #16
	b.mi	.Llast_subqword
	stp	x1, x6, [x15], #16
	b.ne	.Lqword_loop
	ret

/*
 * We have less than a qword to write.  We hope we are aligned but since
 * unaligned access works, we don't have to be aligned.
 */
.Llast_subqword:
	tbz	x2, #3, .Llast_subdword
	str	x1, [x15], #8
.Llast_subdword:
	tbz	x2, #2, .Llast_subword
	str	w1, [x15], #4
.Llast_subword:
	tbz	x2, #1, .Llast_subhword
	strh	w1, [x15], #2
.Llast_subhword:
	tbz	x2, #0, .Lret
	strb	w1, [x15]
.Lret:	ret

/*
 * If we are filling with zeros then let's see if we can use the
 *	dc zva, <Xt>
 * instruction to speed things up.
 */
.Lzerofill:
	mrs	x9, dczid_el0
	/*
	 * Make sure we can the instruction isn't prohibited.
	 */
	tbnz	x9, #4, .Lfilled
	/*
	 * Now find out the block size.
	 */
	ubfx	x9, x9, #0, #4	/* extract low 4 bits */
	add	x9, x9, #2	/* add log2(word) */
	mov	x10, #1		/* the value is log2(words) */
	lsl	x10, x10, x9	/* shift to get the block size */
	cmp	x2, x10		/* are we even copying a block? */
	b.lt	.Lfilled	/*   no, do it 16 bytes at a time */
	/*
	 * Now we figure out how many aligned blocks we have
	 */
	sub	x11, x10, #1	/* make block size a mask */
	add	x12, x15, x11	/* round start to a block boundary */
	asr	x12, x12, x9	/* "starting" block number */
	add	x13, x15, x2	/* get ending address */
	asr	x13, x13, x9	/* "ending" block numebr */
	cmp	x13, x12	/* how many blocks? */
	b.eq	.Lfilled	/*   none, do it 16 bytes at a time */

	/*
	 * Now we have one or more blocks to deal with.  First now we need
	 * to get block aligned.
	 */
	and	x7, x15, x11	/* are already aligned on a block boundary? */
	cbz	x7, .Lblock_aligned

	sub	x7, x10, x7	/* subtract offset from block length */
	sub	x2, x2, x7	/* subtract that from length */
	asr	x7, x7, #2	/* qword -> word */

	tbz	x15, #0, .Lzero_hword_aligned
	strb	wzr, [x15], #1
.Lzero_hword_aligned:
	tbz	x15, #1, .Lzero_word_aligned
	strh	wzr, [x15], #2
.Lzero_word_aligned:
	tbz	x15, #2, .Lzero_dword_aligned
	str	wzr, [x15], #4
.Lzero_dword_aligned:
	tbz	x15, #3, .Lzero_qword_aligned
	str	xzr, [x15], #8
.Lzero_qword_aligned:
	cbz	x7, .Lblock_aligned /* no qwords? just branch */
	adr	x6, .Lblock_aligned
	sub	x6, x6, x7	/* backup to write the last N qwords */
	br	x6		/* and do it */
	/*
	 * This is valid for cache lines <= 256 bytes.
	 */
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16
	stp	xzr, xzr, [x15], #16

/*
 * Now we are block aligned.
 */
.Lblock_aligned:
	subs	x2, x2, x10
	b.mi	.Lblock_done
	dc	zva, x15
	add	x15, x15, x10
	b.ne	.Lblock_aligned
	ret

.Lblock_done:
	and	x2, x2, x12	/* make positive again */
	mov	x6, xzr		/* fill 2nd xword */
	b	.Lqword_loop	/* and finish filling */

END(memset)