blob: d628cc170564809b4d425717e69cf6205ef8578e [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Port on Texas Instruments TMS320C6x architecture
4 *
5 * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
6 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
7 * Rewritten for 2.6.3x: Mark Salter <msalter@redhat.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008 */
9#ifndef _ASM_C6X_UNALIGNED_H
10#define _ASM_C6X_UNALIGNED_H
11
12#include <linux/swab.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020013#include <linux/unaligned/generic.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014
15/*
16 * The C64x+ can do unaligned word and dword accesses in hardware
17 * using special load/store instructions.
18 */
19
20static inline u16 get_unaligned_le16(const void *p)
21{
22 const u8 *_p = p;
23 return _p[0] | _p[1] << 8;
24}
25
26static inline u16 get_unaligned_be16(const void *p)
27{
28 const u8 *_p = p;
29 return _p[0] << 8 | _p[1];
30}
31
32static inline void put_unaligned_le16(u16 val, void *p)
33{
34 u8 *_p = p;
35 _p[0] = val;
36 _p[1] = val >> 8;
37}
38
39static inline void put_unaligned_be16(u16 val, void *p)
40{
41 u8 *_p = p;
42 _p[0] = val >> 8;
43 _p[1] = val;
44}
45
46static inline u32 get_unaligned32(const void *p)
47{
48 u32 val = (u32) p;
49 asm (" ldnw .d1t1 *%0,%0\n"
50 " nop 4\n"
51 : "+a"(val));
52 return val;
53}
54
55static inline void put_unaligned32(u32 val, void *p)
56{
57 asm volatile (" stnw .d2t1 %0,*%1\n"
58 : : "a"(val), "b"(p) : "memory");
59}
60
61static inline u64 get_unaligned64(const void *p)
62{
63 u64 val;
64 asm volatile (" ldndw .d1t1 *%1,%0\n"
65 " nop 4\n"
66 : "=a"(val) : "a"(p));
67 return val;
68}
69
70static inline void put_unaligned64(u64 val, const void *p)
71{
72 asm volatile (" stndw .d2t1 %0,*%1\n"
73 : : "a"(val), "b"(p) : "memory");
74}
75
76#ifdef CONFIG_CPU_BIG_ENDIAN
77
78#define get_unaligned_le32(p) __swab32(get_unaligned32(p))
79#define get_unaligned_le64(p) __swab64(get_unaligned64(p))
80#define get_unaligned_be32(p) get_unaligned32(p)
81#define get_unaligned_be64(p) get_unaligned64(p)
82#define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
83#define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
84#define put_unaligned_be32(v, p) put_unaligned32((v), (p))
85#define put_unaligned_be64(v, p) put_unaligned64((v), (p))
86#define get_unaligned __get_unaligned_be
87#define put_unaligned __put_unaligned_be
88
89#else
90
91#define get_unaligned_le32(p) get_unaligned32(p)
92#define get_unaligned_le64(p) get_unaligned64(p)
93#define get_unaligned_be32(p) __swab32(get_unaligned32(p))
94#define get_unaligned_be64(p) __swab64(get_unaligned64(p))
95#define put_unaligned_le32(v, p) put_unaligned32((v), (p))
96#define put_unaligned_le64(v, p) put_unaligned64((v), (p))
97#define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
98#define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
99#define get_unaligned __get_unaligned_le
100#define put_unaligned __put_unaligned_le
101
102#endif
103
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104#endif /* _ASM_C6X_UNALIGNED_H */