{"schema":"libjg2-1",
"vpath":"/git/",
"avatar":"/git/avatar/",
"alang":"",
"gen_ut":1752655609,
"reponame":"openssl",
"desc":"OpenSSL",
"owner": { "name": "Andy Green", "email": "andy@warmcat.com", "md5": "c50933ca2aa61e0fe2c43d46bb6b59cb" },"url":"https://warmcat.com/repo/openssl",
"f":3,
"items": [
{"schema":"libjg2-1",
"cid":"17f817385816c4f8cd74dc1ed5ef0732",
"commit": {"type":"commit",
"time": 1529582463,
"time_ofs": 120,
"oid_tree": { "oid": "0b63576d0a12b2d793109b242dcff5dec141efca", "alias": []},
"oid":{ "oid": "9be083ad36cd148bddaa11cee835e27b6f5e67df", "alias": []},
"msg": "Remove some inline assembler and non-standard constructs.",
"sig_commit": { "git_time": { "time": 1529582463, "offset": 120 }, "name": "Andy Polyakov", "email": "appro@openssl.org", "md5": "50bd64fa2a792cbbf679fa16213a3b2a" },
"sig_author": { "git_time": { "time": 1529319053, "offset": 120 }, "name": "Andy Polyakov", "email": "appro@openssl.org", "md5": "50bd64fa2a792cbbf679fa16213a3b2a" }},
"body": "Remove some inline assembler and non-standard constructs.\n\nThe goal is to minimize maintenance burden by eliminating somewhat\nobscure platform-specific tweaks that are not viewed as critical for\ncontemporary applications. This affects Camellia and digest\nimplementations that rely on md32_common.h, MD4, MD5, SHA1, SHA256.\nSHA256 is the only one that can be viewed as critical, but given\nthe assembly coverage, the omission is considered appropriate.\n\nReviewed-by: Rich Salz \u003crsalz@openssl.org\u003e\nReviewed-by: Richard Levitte \u003clevitte@openssl.org\u003e\n(Merged from https://github.com/openssl/openssl/pull/6508)\n"
,
"diff": "diff --git a/crypto/camellia/camellia.c b/crypto/camellia/camellia.c\nindex 6641a62..45f2325 100644\n--- a/crypto/camellia/camellia.c\n+++ b/crypto/camellia/camellia.c\n@@ -44,51 +44,11 @@\n #include \u003cstring.h\u003e\n #include \u003cstdlib.h\u003e\n \n-/* 32-bit rotations */\n-#if !defined(PEDANTIC) \u0026\u0026 !defined(OPENSSL_NO_ASM) \u0026\u0026 !defined(OPENSSL_NO_INLINE_ASM)\n-# if defined(_MSC_VER) \u0026\u0026 (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))\n-# define RightRotate(x, s) _lrotr(x, s)\n-# define LeftRotate(x, s) _lrotl(x, s)\n-# if _MSC_VER \u003e\u003d 1400\n-# define SWAP(x) _byteswap_ulong(x)\n-# else\n-# define SWAP(x) (_lrotl(x, 8) \u0026 0x00ff00ff | _lrotr(x, 8) \u0026 0xff00ff00)\n-# endif\n-# define GETU32(p) SWAP(*((u32 *)(p)))\n-# define PUTU32(p,v) (*((u32 *)(p)) \u003d SWAP((v)))\n-# elif defined(__GNUC__) \u0026\u0026 __GNUC__\u003e\u003d2\n-# if defined(__i386) || defined(__x86_64)\n-# define RightRotate(x,s) ({u32 ret; asm (\u0022rorl %1,%0\u0022:\u0022\u003dr\u0022(ret):\u0022I\u0022(s),\u00220\u0022(x):\u0022cc\u0022); ret; })\n-# define LeftRotate(x,s) ({u32 ret; asm (\u0022roll %1,%0\u0022:\u0022\u003dr\u0022(ret):\u0022I\u0022(s),\u00220\u0022(x):\u0022cc\u0022); ret; })\n-# if defined(B_ENDIAN) /* stratus.com does it */\n-# define GETU32(p) (*(u32 *)(p))\n-# define PUTU32(p,v) (*(u32 *)(p)\u003d(v))\n-# else\n-# define GETU32(p) ({u32 r\u003d*(const u32 *)(p); asm(\u0022bswapl %0\u0022:\u0022\u003dr\u0022(r):\u00220\u0022(r)); r; })\n-# define PUTU32(p,v) ({u32 r\u003d(v); asm(\u0022bswapl %0\u0022:\u0022\u003dr\u0022(r):\u00220\u0022(r)); *(u32 *)(p)\u003dr; })\n-# endif\n-# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \u005c\n- defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)\n-# define LeftRotate(x,s) ({u32 ret; asm (\u0022rlwinm %0,%1,%2,0,31\u0022:\u0022\u003dr\u0022(ret):\u0022r\u0022(x),\u0022I\u0022(s)); ret; })\n-# define RightRotate(x,s) LeftRotate(x,(32-s))\n-# elif defined(__s390x__)\n-# define LeftRotate(x,s) ({u32 ret; asm (\u0022rll %0,%1,%2\u0022:\u0022\u003dr\u0022(ret):\u0022r\u0022(x),\u0022I\u0022(s)); ret; })\n-# define RightRotate(x,s) LeftRotate(x,(32-s))\n-# define GETU32(p) (*(u32 *)(p))\n-# define PUTU32(p,v) (*(u32 *)(p)\u003d(v))\n-# endif\n-# endif\n-#endif\n-\n-#if !defined(RightRotate) \u0026\u0026 !defined(LeftRotate)\n-# define RightRotate(x, s) ( ((x) \u003e\u003e (s)) + ((x) \u003c\u003c (32 - s)) )\n-# define LeftRotate(x, s) ( ((x) \u003c\u003c (s)) + ((x) \u003e\u003e (32 - s)) )\n-#endif\n-\n-#if !defined(GETU32) \u0026\u0026 !defined(PUTU32)\n-# define GETU32(p) (((u32)(p)[0] \u003c\u003c 24) ^ ((u32)(p)[1] \u003c\u003c 16) ^ ((u32)(p)[2] \u003c\u003c 8) ^ ((u32)(p)[3]))\n-# define PUTU32(p,v) ((p)[0] \u003d (u8)((v) \u003e\u003e 24), (p)[1] \u003d (u8)((v) \u003e\u003e 16), (p)[2] \u003d (u8)((v) \u003e\u003e 8), (p)[3] \u003d (u8)(v))\n-#endif\n+#define RightRotate(x, s) ( ((x) \u003e\u003e (s)) + ((x) \u003c\u003c (32 - s)) )\n+#define LeftRotate(x, s) ( ((x) \u003c\u003c (s)) + ((x) \u003e\u003e (32 - s)) )\n+\n+#define GETU32(p) (((u32)(p)[0] \u003c\u003c 24) ^ ((u32)(p)[1] \u003c\u003c 16) ^ ((u32)(p)[2] \u003c\u003c 8) ^ ((u32)(p)[3]))\n+#define PUTU32(p,v) ((p)[0] \u003d (u8)((v) \u003e\u003e 24), (p)[1] \u003d (u8)((v) \u003e\u003e 16), (p)[2] \u003d (u8)((v) \u003e\u003e 8), (p)[3] \u003d (u8)(v))\n \n /* S-box data */\n #define SBOX1_1110 Camellia_SBOX[0]\ndiff --git a/crypto/include/internal/md32_common.h b/crypto/include/internal/md32_common.h\nindex e498cf3..2e9f893 100644\n--- a/crypto/include/internal/md32_common.h\n+++ b/crypto/include/internal/md32_common.h\n@@ -93,149 +93,31 @@\n # error \u0022HASH_BLOCK_DATA_ORDER must be defined!\u0022\n #endif\n \n-/*\n- * Engage compiler specific rotate intrinsic function if available.\n- */\n-#undef ROTATE\n-#ifndef PEDANTIC\n-# if defined(_MSC_VER)\n-# define ROTATE(a,n) _lrotl(a,n)\n-# elif defined(__ICC)\n-# define ROTATE(a,n) _rotl(a,n)\n-# elif defined(__GNUC__) \u0026\u0026 __GNUC__\u003e\u003d2 \u0026\u0026 !defined(OPENSSL_NO_ASM) \u0026\u0026 !defined(OPENSSL_NO_INLINE_ASM)\n- /*\n- * Some GNU C inline assembler templates. Note that these are\n- * rotates by *constant* number of bits! But that's exactly\n- * what we need here...\n- */\n-# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)\n-# define ROTATE(a,n) ({ register unsigned int ret; \u005c\n- asm ( \u005c\n- \u0022roll %1,%0\u0022 \u005c\n- : \u0022\u003dr\u0022(ret) \u005c\n- : \u0022I\u0022(n), \u00220\u0022((unsigned int)(a)) \u005c\n- : \u0022cc\u0022); \u005c\n- ret; \u005c\n- })\n-# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \u005c\n- defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)\n-# define ROTATE(a,n) ({ register unsigned int ret; \u005c\n- asm ( \u005c\n- \u0022rlwinm %0,%1,%2,0,31\u0022 \u005c\n- : \u0022\u003dr\u0022(ret) \u005c\n- : \u0022r\u0022(a), \u0022I\u0022(n)); \u005c\n- ret; \u005c\n- })\n-# elif defined(__s390x__)\n-# define ROTATE(a,n) ({ register unsigned int ret; \u005c\n- asm (\u0022rll %0,%1,%2\u0022 \u005c\n- : \u0022\u003dr\u0022(ret) \u005c\n- : \u0022r\u0022(a), \u0022I\u0022(n)); \u005c\n- ret; \u005c\n- })\n-# endif\n-# endif\n-#endif /* PEDANTIC */\n-\n-#ifndef ROTATE\n-# define ROTATE(a,n) (((a)\u003c\u003c(n))|(((a)\u00260xffffffff)\u003e\u003e(32-(n))))\n-#endif\n+#define ROTATE(a,n) (((a)\u003c\u003c(n))|(((a)\u00260xffffffff)\u003e\u003e(32-(n))))\n \n #if defined(DATA_ORDER_IS_BIG_ENDIAN)\n \n-# ifndef PEDANTIC\n-# if defined(__GNUC__) \u0026\u0026 __GNUC__\u003e\u003d2 \u0026\u0026 !defined(OPENSSL_NO_ASM) \u0026\u0026 !defined(OPENSSL_NO_INLINE_ASM)\n-# if ((defined(__i386) || defined(__i386__)) \u0026\u0026 !defined(I386_ONLY)) || \u005c\n- (defined(__x86_64) || defined(__x86_64__))\n-# if !defined(B_ENDIAN)\n- /*\n- * This gives ~30-40% performance improvement in SHA-256 compiled\n- * with gcc [on P4]. Well, first macro to be frank. We can pull\n- * this trick on x86* platforms only, because these CPUs can fetch\n- * unaligned data without raising an exception.\n- */\n-# define HOST_c2l(c,l) ({ unsigned int r\u003d*((const unsigned int *)(c)); \u005c\n- asm (\u0022bswapl %0\u0022:\u0022\u003dr\u0022(r):\u00220\u0022(r)); \u005c\n- (c)+\u003d4; (l)\u003dr; })\n-# define HOST_l2c(l,c) ({ unsigned int r\u003d(l); \u005c\n- asm (\u0022bswapl %0\u0022:\u0022\u003dr\u0022(r):\u00220\u0022(r)); \u005c\n- *((unsigned int *)(c))\u003dr; (c)+\u003d4; r; })\n-# endif\n-# elif defined(__aarch64__)\n-# if defined(__BYTE_ORDER__)\n-# if defined(__ORDER_LITTLE_ENDIAN__) \u0026\u0026 __BYTE_ORDER__\u003d\u003d__ORDER_LITTLE_ENDIAN__\n-# define HOST_c2l(c,l) ({ unsigned int r; \u005c\n- asm (\u0022rev %w0,%w1\u0022 \u005c\n- :\u0022\u003dr\u0022(r) \u005c\n- :\u0022r\u0022(*((const unsigned int *)(c))));\u005c\n- (c)+\u003d4; (l)\u003dr; })\n-# define HOST_l2c(l,c) ({ unsigned int r; \u005c\n- asm (\u0022rev %w0,%w1\u0022 \u005c\n- :\u0022\u003dr\u0022(r) \u005c\n- :\u0022r\u0022((unsigned int)(l)));\u005c\n- *((unsigned int *)(c))\u003dr; (c)+\u003d4; r; })\n-# elif defined(__ORDER_BIG_ENDIAN__) \u0026\u0026 __BYTE_ORDER__\u003d\u003d__ORDER_BIG_ENDIAN__\n-# define HOST_c2l(c,l) ((l)\u003d*((const unsigned int *)(c)), (c)+\u003d4, (l))\n-# define HOST_l2c(l,c) (*((unsigned int *)(c))\u003d(l), (c)+\u003d4, (l))\n-# endif\n-# endif\n-# endif\n-# endif\n-# if defined(__s390__) || defined(__s390x__)\n-# define HOST_c2l(c,l) ((l)\u003d*((const unsigned int *)(c)), (c)+\u003d4, (l))\n-# define HOST_l2c(l,c) (*((unsigned int *)(c))\u003d(l), (c)+\u003d4, (l))\n-# endif\n-# endif\n-\n-# ifndef HOST_c2l\n-# define HOST_c2l(c,l) (l \u003d(((unsigned long)(*((c)++)))\u003c\u003c24), \u005c\n+# define HOST_c2l(c,l) (l \u003d(((unsigned long)(*((c)++)))\u003c\u003c24), \u005c\n l|\u003d(((unsigned long)(*((c)++)))\u003c\u003c16), \u005c\n l|\u003d(((unsigned long)(*((c)++)))\u003c\u003c 8), \u005c\n l|\u003d(((unsigned long)(*((c)++))) ) )\n-# endif\n-# ifndef HOST_l2c\n-# define HOST_l2c(l,c) (*((c)++)\u003d(unsigned char)(((l)\u003e\u003e24)\u00260xff), \u005c\n+# define HOST_l2c(l,c) (*((c)++)\u003d(unsigned char)(((l)\u003e\u003e24)\u00260xff), \u005c\n *((c)++)\u003d(unsigned char)(((l)\u003e\u003e16)\u00260xff), \u005c\n *((c)++)\u003d(unsigned char)(((l)\u003e\u003e 8)\u00260xff), \u005c\n *((c)++)\u003d(unsigned char)(((l) )\u00260xff), \u005c\n l)\n-# endif\n \n #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)\n \n-# ifndef PEDANTIC\n-# if defined(__GNUC__) \u0026\u0026 __GNUC__\u003e\u003d2 \u0026\u0026 !defined(OPENSSL_NO_ASM) \u0026\u0026 !defined(OPENSSL_NO_INLINE_ASM)\n-# if defined(__s390x__)\n-# define HOST_c2l(c,l) ({ asm (\u0022lrv %0,%1\u0022 \u005c\n- :\u0022\u003dd\u0022(l) :\u0022m\u0022(*(const unsigned int *)(c)));\u005c\n- (c)+\u003d4; (l); })\n-# define HOST_l2c(l,c) ({ asm (\u0022strv %1,%0\u0022 \u005c\n- :\u0022\u003dm\u0022(*(unsigned int *)(c)) :\u0022d\u0022(l));\u005c\n- (c)+\u003d4; (l); })\n-# endif\n-# endif\n-# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)\n-# ifndef B_ENDIAN\n- /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */\n-# define HOST_c2l(c,l) ((l)\u003d*((const unsigned int *)(c)), (c)+\u003d4, l)\n-# define HOST_l2c(l,c) (*((unsigned int *)(c))\u003d(l), (c)+\u003d4, l)\n-# endif\n-# endif\n-# endif\n-\n-# ifndef HOST_c2l\n-# define HOST_c2l(c,l) (l \u003d(((unsigned long)(*((c)++))) ), \u005c\n+# define HOST_c2l(c,l) (l \u003d(((unsigned long)(*((c)++))) ), \u005c\n l|\u003d(((unsigned long)(*((c)++)))\u003c\u003c 8), \u005c\n l|\u003d(((unsigned long)(*((c)++)))\u003c\u003c16), \u005c\n l|\u003d(((unsigned long)(*((c)++)))\u003c\u003c24) )\n-# endif\n-# ifndef HOST_l2c\n-# define HOST_l2c(l,c) (*((c)++)\u003d(unsigned char)(((l) )\u00260xff), \u005c\n+# define HOST_l2c(l,c) (*((c)++)\u003d(unsigned char)(((l) )\u00260xff), \u005c\n *((c)++)\u003d(unsigned char)(((l)\u003e\u003e 8)\u00260xff), \u005c\n *((c)++)\u003d(unsigned char)(((l)\u003e\u003e16)\u00260xff), \u005c\n *((c)++)\u003d(unsigned char)(((l)\u003e\u003e24)\u00260xff), \u005c\n l)\n-# endif\n \n #endif\n \n","s":{"c":1752655609,"u": 35732}}
],"g": 37892,"chitpc": 0,"ehitpc": 0,"indexed":0
,
"ab": 0, "si": 0, "db":0, "di":0, "sat":0, "lfc": "0000"}