helper_unsafe.go 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
  2. // Use of this source code is governed by a MIT license found in the LICENSE file.
  3. //go:build !safe && !codec.safe && !appengine && go1.9
  4. // +build !safe,!codec.safe,!appengine,go1.9
  5. // minimum of go 1.9 is needed, as that is the minimum for all features and linked functions we need
  6. // - typedmemclr was introduced in go 1.8
  7. // - mapassign_fastXXX was introduced in go 1.9
  8. // etc
  9. package codec
  10. import (
  11. "reflect"
  12. _ "runtime" // needed for go linkname(s)
  13. "sync/atomic"
  14. "time"
  15. "unsafe"
  16. )
  17. // This file has unsafe variants of some helper functions.
  18. // MARKER: See helper_unsafe.go for the usage documentation.
  19. // There are a number of helper_*unsafe*.go files.
  20. //
  21. // - helper_unsafe
  22. // unsafe variants of dependent functions
  23. // - helper_unsafe_compiler_gc (gc)
  24. // unsafe variants of dependent functions which cannot be shared with gollvm or gccgo
  25. // - helper_not_unsafe_not_gc (gccgo/gollvm or safe)
  26. // safe variants of functions in helper_unsafe_compiler_gc
  27. // - helper_not_unsafe (safe)
  28. // safe variants of functions in helper_unsafe
  29. // - helper_unsafe_compiler_not_gc (gccgo, gollvm)
  30. // unsafe variants of functions/variables which non-standard compilers need
  31. //
  32. // This way, we can judiciously use build tags to include the right set of files
  33. // for any compiler, and make it run optimally in unsafe mode.
  34. //
  35. // As of March 2021, we cannot differentiate whether running with gccgo or gollvm
  36. // using a build constraint, as both satisfy 'gccgo' build tag.
  37. // Consequently, we must use the lowest common denominator to support both.
  38. // For reflect.Value code, we decided to do the following:
  39. // - if we know the kind, we can elide conditional checks for
  40. // - SetXXX (Int, Uint, String, Bool, etc)
  41. // - SetLen
  42. //
  43. // We can also optimize
  44. // - IsNil
  45. // MARKER: Some functions here will not be hit during code coverage runs due to optimizations, e.g.
  46. // - rvCopySlice: decode calls it if rvGrowSlice didn't set the new slice into the pointer to the orig slice.
  47. // however, helper_unsafe sets it, so there's no need to call rvCopySlice later
  48. // - rvSlice: same as above
  49. // - rvGetArray4Bytes: only called within kArray for []byte, but that is now handled
  50. // within the fast-path directly
  51. const safeMode = false
  52. // helperUnsafeDirectAssignMapEntry says that we should not copy the pointer in the map
  53. // to another value during mapRange/iteration and mapGet calls, but directly assign it.
  54. //
  55. // The only callers of mapRange/iteration is encode.
  56. // Here, we just walk through the values and encode them
  57. //
  58. // The only caller of mapGet is decode.
  59. // Here, it does a Get if the underlying value is a pointer, and decodes into that.
  60. //
  61. // For both users, we are very careful NOT to modify or keep the pointers around.
  62. // Consequently, it is ok for take advantage of the performance that the map is not modified
  63. // during an iteration and we can just "peek" at the internal value" in the map and use it.
  64. const helperUnsafeDirectAssignMapEntry = true
  65. // MARKER: keep in sync with GO_ROOT/src/reflect/value.go
  66. const (
  67. unsafeFlagStickyRO = 1 << 5
  68. unsafeFlagEmbedRO = 1 << 6
  69. unsafeFlagIndir = 1 << 7
  70. unsafeFlagAddr = 1 << 8
  71. unsafeFlagRO = unsafeFlagStickyRO | unsafeFlagEmbedRO
  72. // unsafeFlagKindMask = (1 << 5) - 1 // 5 bits for 27 kinds (up to 31)
  73. // unsafeTypeKindDirectIface = 1 << 5
  74. )
  75. // transientSizeMax below is used in TransientAddr as the backing storage.
  76. //
  77. // Must be >= 16 as the maximum size is a complex128 (or string on 64-bit machines).
  78. const transientSizeMax = 64
  79. // should struct/array support internal strings and slices?
  80. const transientValueHasStringSlice = false
  81. type unsafeString struct {
  82. Data unsafe.Pointer
  83. Len int
  84. }
  85. type unsafeSlice struct {
  86. Data unsafe.Pointer
  87. Len int
  88. Cap int
  89. }
  90. type unsafeIntf struct {
  91. typ unsafe.Pointer
  92. ptr unsafe.Pointer
  93. }
  94. type unsafeReflectValue struct {
  95. unsafeIntf
  96. flag uintptr
  97. }
  98. // keep in sync with stdlib runtime/type.go
  99. type unsafeRuntimeType struct {
  100. size uintptr
  101. // ... many other fields here
  102. }
  103. // unsafeZeroAddr and unsafeZeroSlice points to a read-only block of memory
  104. // used for setting a zero value for most types or creating a read-only
  105. // zero value for a given type.
  106. var (
  107. unsafeZeroAddr = unsafe.Pointer(&unsafeZeroArr[0])
  108. unsafeZeroSlice = unsafeSlice{unsafeZeroAddr, 0, 0}
  109. )
  110. // We use a scratch memory and an unsafeSlice for transient values:
  111. //
  112. // unsafeSlice is used for standalone strings and slices (outside an array or struct).
  113. // scratch memory is used for other kinds, based on contract below:
  114. // - numbers, bool are always transient
  115. // - structs and arrays are transient iff they have no pointers i.e.
  116. // no string, slice, chan, func, interface, map, etc only numbers and bools.
  117. // - slices and strings are transient (using the unsafeSlice)
  118. type unsafePerTypeElem struct {
  119. arr [transientSizeMax]byte // for bool, number, struct, array kinds
  120. slice unsafeSlice // for string and slice kinds
  121. }
  122. func (x *unsafePerTypeElem) addrFor(k reflect.Kind) unsafe.Pointer {
  123. if k == reflect.String || k == reflect.Slice {
  124. x.slice = unsafeSlice{} // memclr
  125. return unsafe.Pointer(&x.slice)
  126. }
  127. x.arr = [transientSizeMax]byte{} // memclr
  128. return unsafe.Pointer(&x.arr)
  129. }
  130. type perType struct {
  131. elems [2]unsafePerTypeElem
  132. }
  133. type decPerType struct {
  134. perType
  135. }
  136. type encPerType struct{}
  137. // TransientAddrK is used for getting a *transient* value to be decoded into,
  138. // which will right away be used for something else.
  139. //
  140. // See notes in helper.go about "Transient values during decoding"
  141. func (x *perType) TransientAddrK(t reflect.Type, k reflect.Kind) reflect.Value {
  142. return rvZeroAddrTransientAnyK(t, k, x.elems[0].addrFor(k))
  143. }
  144. func (x *perType) TransientAddr2K(t reflect.Type, k reflect.Kind) reflect.Value {
  145. return rvZeroAddrTransientAnyK(t, k, x.elems[1].addrFor(k))
  146. }
  147. func (encPerType) AddressableRO(v reflect.Value) reflect.Value {
  148. return rvAddressableReadonly(v)
  149. }
  150. // stringView returns a view of the []byte as a string.
  151. // In unsafe mode, it doesn't incur allocation and copying caused by conversion.
  152. // In regular safe mode, it is an allocation and copy.
  153. func stringView(v []byte) string {
  154. return *(*string)(unsafe.Pointer(&v))
  155. }
  156. // bytesView returns a view of the string as a []byte.
  157. // In unsafe mode, it doesn't incur allocation and copying caused by conversion.
  158. // In regular safe mode, it is an allocation and copy.
  159. func bytesView(v string) (b []byte) {
  160. sx := (*unsafeString)(unsafe.Pointer(&v))
  161. bx := (*unsafeSlice)(unsafe.Pointer(&b))
  162. bx.Data, bx.Len, bx.Cap = sx.Data, sx.Len, sx.Len
  163. return
  164. }
  165. func byteSliceSameData(v1 []byte, v2 []byte) bool {
  166. return (*unsafeSlice)(unsafe.Pointer(&v1)).Data == (*unsafeSlice)(unsafe.Pointer(&v2)).Data
  167. }
  168. // MARKER: okBytesN functions will copy N bytes into the top slots of the return array.
  169. // These functions expect that the bounds are valid, and have been checked before this is called.
  170. // copy(...) does a number of checks which are unnecessary in this situation when in bounds.
  171. func okBytes3(b []byte) (v [4]byte) {
  172. *(*[3]byte)(unsafe.Pointer(&v[1])) = *((*[3]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
  173. return
  174. }
  175. func okBytes4(b []byte) [4]byte {
  176. return *((*[4]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
  177. }
  178. func okBytes8(b []byte) [8]byte {
  179. return *((*[8]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
  180. }
  181. // isNil says whether the value v is nil.
  182. // This applies to references like map/ptr/unsafepointer/chan/func,
  183. // and non-reference values like interface/slice.
  184. func isNil(v interface{}) (rv reflect.Value, isnil bool) {
  185. var ui = (*unsafeIntf)(unsafe.Pointer(&v))
  186. isnil = ui.ptr == nil
  187. if !isnil {
  188. rv, isnil = unsafeIsNilIntfOrSlice(ui, v)
  189. }
  190. return
  191. }
  192. func unsafeIsNilIntfOrSlice(ui *unsafeIntf, v interface{}) (rv reflect.Value, isnil bool) {
  193. rv = reflect.ValueOf(v) // reflect.ValueOf is currently not inline'able - so call it directly
  194. tk := rv.Kind()
  195. isnil = (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.ptr) == nil
  196. return
  197. }
  198. // return the pointer for a reference (map/chan/func/pointer/unsafe.Pointer).
  199. // true references (map, func, chan, ptr - NOT slice) may be double-referenced? as flagIndir
  200. //
  201. // Assumes that v is a reference (map/func/chan/ptr/func)
  202. func rvRefPtr(v *unsafeReflectValue) unsafe.Pointer {
  203. if v.flag&unsafeFlagIndir != 0 {
  204. return *(*unsafe.Pointer)(v.ptr)
  205. }
  206. return v.ptr
  207. }
  208. func eq4i(i0, i1 interface{}) bool {
  209. v0 := (*unsafeIntf)(unsafe.Pointer(&i0))
  210. v1 := (*unsafeIntf)(unsafe.Pointer(&i1))
  211. return v0.typ == v1.typ && v0.ptr == v1.ptr
  212. }
  213. func rv4iptr(i interface{}) (v reflect.Value) {
  214. // Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
  215. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  216. uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
  217. uv.flag = uintptr(rkindPtr)
  218. return
  219. }
  220. func rv4istr(i interface{}) (v reflect.Value) {
  221. // Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
  222. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  223. uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
  224. uv.flag = uintptr(rkindString) | unsafeFlagIndir
  225. return
  226. }
  227. func rv2i(rv reflect.Value) (i interface{}) {
  228. // We tap into implememtation details from
  229. // the source go stdlib reflect/value.go, and trims the implementation.
  230. //
  231. // e.g.
  232. // - a map/ptr is a reference, thus flagIndir is not set on it
  233. // - an int/slice is not a reference, thus flagIndir is set on it
  234. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  235. if refBitset.isset(byte(rv.Kind())) && urv.flag&unsafeFlagIndir != 0 {
  236. urv.ptr = *(*unsafe.Pointer)(urv.ptr)
  237. }
  238. return *(*interface{})(unsafe.Pointer(&urv.unsafeIntf))
  239. }
  240. func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
  241. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  242. urv.flag = (urv.flag & unsafeFlagRO) | uintptr(reflect.Ptr)
  243. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&ptrType))).ptr
  244. return rv
  245. }
  246. func rvIsNil(rv reflect.Value) bool {
  247. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  248. if urv.flag&unsafeFlagIndir != 0 {
  249. return *(*unsafe.Pointer)(urv.ptr) == nil
  250. }
  251. return urv.ptr == nil
  252. }
  253. func rvSetSliceLen(rv reflect.Value, length int) {
  254. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  255. (*unsafeString)(urv.ptr).Len = length
  256. }
  257. func rvZeroAddrK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
  258. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  259. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  260. urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
  261. urv.ptr = unsafeNew(urv.typ)
  262. return
  263. }
  264. func rvZeroAddrTransientAnyK(t reflect.Type, k reflect.Kind, addr unsafe.Pointer) (rv reflect.Value) {
  265. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  266. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  267. urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
  268. urv.ptr = addr
  269. return
  270. }
  271. func rvZeroK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
  272. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  273. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  274. if refBitset.isset(byte(k)) {
  275. urv.flag = uintptr(k)
  276. } else if rtsize2(urv.typ) <= uintptr(len(unsafeZeroArr)) {
  277. urv.flag = uintptr(k) | unsafeFlagIndir
  278. urv.ptr = unsafeZeroAddr
  279. } else { // meaning struct or array
  280. urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
  281. urv.ptr = unsafeNew(urv.typ)
  282. }
  283. return
  284. }
  285. // rvConvert will convert a value to a different type directly,
  286. // ensuring that they still point to the same underlying value.
  287. func rvConvert(v reflect.Value, t reflect.Type) reflect.Value {
  288. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  289. uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  290. return v
  291. }
  292. // rvAddressableReadonly returns an addressable reflect.Value.
  293. //
  294. // Use it within encode calls, when you just want to "read" the underlying ptr
  295. // without modifying the value.
  296. //
  297. // Note that it cannot be used for r/w use, as those non-addressable values
  298. // may have been stored in read-only memory, and trying to write the pointer
  299. // may cause a segfault.
  300. func rvAddressableReadonly(v reflect.Value) reflect.Value {
  301. // hack to make an addressable value out of a non-addressable one.
  302. // Assume folks calling it are passing a value that can be addressable, but isn't.
  303. // This assumes that the flagIndir is already set on it.
  304. // so we just set the flagAddr bit on the flag (and do not set the flagIndir).
  305. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  306. uv.flag = uv.flag | unsafeFlagAddr // | unsafeFlagIndir
  307. return v
  308. }
  309. func rtsize2(rt unsafe.Pointer) uintptr {
  310. return ((*unsafeRuntimeType)(rt)).size
  311. }
  312. func rt2id(rt reflect.Type) uintptr {
  313. return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).ptr)
  314. }
  315. func i2rtid(i interface{}) uintptr {
  316. return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ)
  317. }
  318. // --------------------------
  319. func unsafeCmpZero(ptr unsafe.Pointer, size int) bool {
  320. // verified that size is always within right range, so no chance of OOM
  321. var s1 = unsafeString{ptr, size}
  322. var s2 = unsafeString{unsafeZeroAddr, size}
  323. if size > len(unsafeZeroArr) {
  324. arr := make([]byte, size)
  325. s2.Data = unsafe.Pointer(&arr[0])
  326. }
  327. return *(*string)(unsafe.Pointer(&s1)) == *(*string)(unsafe.Pointer(&s2)) // memcmp
  328. }
  329. func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
  330. urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  331. if urv.flag == 0 {
  332. return true
  333. }
  334. if recursive {
  335. return isEmptyValueFallbackRecur(urv, v, tinfos)
  336. }
  337. return unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ)))
  338. }
  339. func isEmptyValueFallbackRecur(urv *unsafeReflectValue, v reflect.Value, tinfos *TypeInfos) bool {
  340. const recursive = true
  341. switch v.Kind() {
  342. case reflect.Invalid:
  343. return true
  344. case reflect.String:
  345. return (*unsafeString)(urv.ptr).Len == 0
  346. case reflect.Slice:
  347. return (*unsafeSlice)(urv.ptr).Len == 0
  348. case reflect.Bool:
  349. return !*(*bool)(urv.ptr)
  350. case reflect.Int:
  351. return *(*int)(urv.ptr) == 0
  352. case reflect.Int8:
  353. return *(*int8)(urv.ptr) == 0
  354. case reflect.Int16:
  355. return *(*int16)(urv.ptr) == 0
  356. case reflect.Int32:
  357. return *(*int32)(urv.ptr) == 0
  358. case reflect.Int64:
  359. return *(*int64)(urv.ptr) == 0
  360. case reflect.Uint:
  361. return *(*uint)(urv.ptr) == 0
  362. case reflect.Uint8:
  363. return *(*uint8)(urv.ptr) == 0
  364. case reflect.Uint16:
  365. return *(*uint16)(urv.ptr) == 0
  366. case reflect.Uint32:
  367. return *(*uint32)(urv.ptr) == 0
  368. case reflect.Uint64:
  369. return *(*uint64)(urv.ptr) == 0
  370. case reflect.Uintptr:
  371. return *(*uintptr)(urv.ptr) == 0
  372. case reflect.Float32:
  373. return *(*float32)(urv.ptr) == 0
  374. case reflect.Float64:
  375. return *(*float64)(urv.ptr) == 0
  376. case reflect.Complex64:
  377. return unsafeCmpZero(urv.ptr, 8)
  378. case reflect.Complex128:
  379. return unsafeCmpZero(urv.ptr, 16)
  380. case reflect.Struct:
  381. // return isEmptyStruct(v, tinfos, recursive)
  382. if tinfos == nil {
  383. tinfos = defTypeInfos
  384. }
  385. ti := tinfos.find(uintptr(urv.typ))
  386. if ti == nil {
  387. ti = tinfos.load(rvType(v))
  388. }
  389. return unsafeCmpZero(urv.ptr, int(ti.size))
  390. case reflect.Interface, reflect.Ptr:
  391. // isnil := urv.ptr == nil // (not sufficient, as a pointer value encodes the type)
  392. isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
  393. if recursive && !isnil {
  394. return isEmptyValue(v.Elem(), tinfos, recursive)
  395. }
  396. return isnil
  397. case reflect.UnsafePointer:
  398. return urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
  399. case reflect.Chan:
  400. return urv.ptr == nil || len_chan(rvRefPtr(urv)) == 0
  401. case reflect.Map:
  402. return urv.ptr == nil || len_map(rvRefPtr(urv)) == 0
  403. case reflect.Array:
  404. return v.Len() == 0
  405. }
  406. return false
  407. }
  408. // --------------------------
  409. type structFieldInfos struct {
  410. c unsafe.Pointer // source
  411. s unsafe.Pointer // sorted
  412. length int
  413. }
  414. func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
  415. s := (*unsafeSlice)(unsafe.Pointer(&sorted))
  416. x.s = s.Data
  417. x.length = s.Len
  418. s = (*unsafeSlice)(unsafe.Pointer(&source))
  419. x.c = s.Data
  420. }
  421. func (x *structFieldInfos) sorted() (v []*structFieldInfo) {
  422. *(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length}
  423. // s := (*unsafeSlice)(unsafe.Pointer(&v))
  424. // s.Data = x.sorted0
  425. // s.Len = x.length
  426. // s.Cap = s.Len
  427. return
  428. }
  429. func (x *structFieldInfos) source() (v []*structFieldInfo) {
  430. *(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.c, x.length, x.length}
  431. return
  432. }
  433. // atomicXXX is expected to be 2 words (for symmetry with atomic.Value)
  434. //
  435. // Note that we do not atomically load/store length and data pointer separately,
  436. // as this could lead to some races. Instead, we atomically load/store cappedSlice.
  437. //
  438. // Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly.
  439. // ----------------------
  440. type atomicTypeInfoSlice struct {
  441. v unsafe.Pointer // *[]rtid2ti
  442. }
  443. func (x *atomicTypeInfoSlice) load() (s []rtid2ti) {
  444. x2 := atomic.LoadPointer(&x.v)
  445. if x2 != nil {
  446. s = *(*[]rtid2ti)(x2)
  447. }
  448. return
  449. }
  450. func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
  451. atomic.StorePointer(&x.v, unsafe.Pointer(&p))
  452. }
  453. // MARKER: in safe mode, atomicXXX are atomic.Value, which contains an interface{}.
  454. // This is 2 words.
  455. // consider padding atomicXXX here with a uintptr, so they fit into 2 words also.
  456. // --------------------------
  457. type atomicRtidFnSlice struct {
  458. v unsafe.Pointer // *[]codecRtidFn
  459. }
  460. func (x *atomicRtidFnSlice) load() (s []codecRtidFn) {
  461. x2 := atomic.LoadPointer(&x.v)
  462. if x2 != nil {
  463. s = *(*[]codecRtidFn)(x2)
  464. }
  465. return
  466. }
  467. func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
  468. atomic.StorePointer(&x.v, unsafe.Pointer(&p))
  469. }
  470. // --------------------------
  471. type atomicClsErr struct {
  472. v unsafe.Pointer // *clsErr
  473. }
  474. func (x *atomicClsErr) load() (e clsErr) {
  475. x2 := (*clsErr)(atomic.LoadPointer(&x.v))
  476. if x2 != nil {
  477. e = *x2
  478. }
  479. return
  480. }
  481. func (x *atomicClsErr) store(p clsErr) {
  482. atomic.StorePointer(&x.v, unsafe.Pointer(&p))
  483. }
  484. // --------------------------
  485. // to create a reflect.Value for each member field of fauxUnion,
  486. // we first create a global fauxUnion, and create reflect.Value
  487. // for them all.
  488. // This way, we have the flags and type in the reflect.Value.
  489. // Then, when a reflect.Value is called, we just copy it,
  490. // update the ptr to the fauxUnion's, and return it.
  491. type unsafeDecNakedWrapper struct {
  492. fauxUnion
  493. ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
  494. }
  495. func (n *unsafeDecNakedWrapper) init() {
  496. n.ru = rv4iptr(&n.u).Elem()
  497. n.ri = rv4iptr(&n.i).Elem()
  498. n.rf = rv4iptr(&n.f).Elem()
  499. n.rl = rv4iptr(&n.l).Elem()
  500. n.rs = rv4iptr(&n.s).Elem()
  501. n.rt = rv4iptr(&n.t).Elem()
  502. n.rb = rv4iptr(&n.b).Elem()
  503. // n.rr[] = reflect.ValueOf(&n.)
  504. }
  505. var defUnsafeDecNakedWrapper unsafeDecNakedWrapper
  506. func init() {
  507. defUnsafeDecNakedWrapper.init()
  508. }
  509. func (n *fauxUnion) ru() (v reflect.Value) {
  510. v = defUnsafeDecNakedWrapper.ru
  511. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.u)
  512. return
  513. }
  514. func (n *fauxUnion) ri() (v reflect.Value) {
  515. v = defUnsafeDecNakedWrapper.ri
  516. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.i)
  517. return
  518. }
  519. func (n *fauxUnion) rf() (v reflect.Value) {
  520. v = defUnsafeDecNakedWrapper.rf
  521. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.f)
  522. return
  523. }
  524. func (n *fauxUnion) rl() (v reflect.Value) {
  525. v = defUnsafeDecNakedWrapper.rl
  526. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.l)
  527. return
  528. }
  529. func (n *fauxUnion) rs() (v reflect.Value) {
  530. v = defUnsafeDecNakedWrapper.rs
  531. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.s)
  532. return
  533. }
  534. func (n *fauxUnion) rt() (v reflect.Value) {
  535. v = defUnsafeDecNakedWrapper.rt
  536. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.t)
  537. return
  538. }
  539. func (n *fauxUnion) rb() (v reflect.Value) {
  540. v = defUnsafeDecNakedWrapper.rb
  541. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.b)
  542. return
  543. }
  544. // --------------------------
  545. func rvSetBytes(rv reflect.Value, v []byte) {
  546. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  547. *(*[]byte)(urv.ptr) = v
  548. }
  549. func rvSetString(rv reflect.Value, v string) {
  550. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  551. *(*string)(urv.ptr) = v
  552. }
  553. func rvSetBool(rv reflect.Value, v bool) {
  554. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  555. *(*bool)(urv.ptr) = v
  556. }
  557. func rvSetTime(rv reflect.Value, v time.Time) {
  558. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  559. *(*time.Time)(urv.ptr) = v
  560. }
  561. func rvSetFloat32(rv reflect.Value, v float32) {
  562. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  563. *(*float32)(urv.ptr) = v
  564. }
  565. func rvSetFloat64(rv reflect.Value, v float64) {
  566. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  567. *(*float64)(urv.ptr) = v
  568. }
  569. func rvSetComplex64(rv reflect.Value, v complex64) {
  570. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  571. *(*complex64)(urv.ptr) = v
  572. }
  573. func rvSetComplex128(rv reflect.Value, v complex128) {
  574. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  575. *(*complex128)(urv.ptr) = v
  576. }
  577. func rvSetInt(rv reflect.Value, v int) {
  578. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  579. *(*int)(urv.ptr) = v
  580. }
  581. func rvSetInt8(rv reflect.Value, v int8) {
  582. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  583. *(*int8)(urv.ptr) = v
  584. }
  585. func rvSetInt16(rv reflect.Value, v int16) {
  586. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  587. *(*int16)(urv.ptr) = v
  588. }
  589. func rvSetInt32(rv reflect.Value, v int32) {
  590. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  591. *(*int32)(urv.ptr) = v
  592. }
  593. func rvSetInt64(rv reflect.Value, v int64) {
  594. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  595. *(*int64)(urv.ptr) = v
  596. }
  597. func rvSetUint(rv reflect.Value, v uint) {
  598. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  599. *(*uint)(urv.ptr) = v
  600. }
  601. func rvSetUintptr(rv reflect.Value, v uintptr) {
  602. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  603. *(*uintptr)(urv.ptr) = v
  604. }
  605. func rvSetUint8(rv reflect.Value, v uint8) {
  606. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  607. *(*uint8)(urv.ptr) = v
  608. }
  609. func rvSetUint16(rv reflect.Value, v uint16) {
  610. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  611. *(*uint16)(urv.ptr) = v
  612. }
  613. func rvSetUint32(rv reflect.Value, v uint32) {
  614. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  615. *(*uint32)(urv.ptr) = v
  616. }
  617. func rvSetUint64(rv reflect.Value, v uint64) {
  618. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  619. *(*uint64)(urv.ptr) = v
  620. }
  621. // ----------------
  622. // rvSetZero is rv.Set(reflect.Zero(rv.Type()) for all kinds (including reflect.Interface).
  623. func rvSetZero(rv reflect.Value) {
  624. rvSetDirectZero(rv)
  625. }
  626. func rvSetIntf(rv reflect.Value, v reflect.Value) {
  627. rv.Set(v)
  628. }
  629. // rvSetDirect is rv.Set for all kinds except reflect.Interface.
  630. //
  631. // Callers MUST not pass a value of kind reflect.Interface, as it may cause unexpected segfaults.
  632. func rvSetDirect(rv reflect.Value, v reflect.Value) {
  633. // MARKER: rv.Set for kind reflect.Interface may do a separate allocation if a scalar value.
  634. // The book-keeping is onerous, so we just do the simple ones where a memmove is sufficient.
  635. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  636. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  637. if uv.flag&unsafeFlagIndir == 0 {
  638. *(*unsafe.Pointer)(urv.ptr) = uv.ptr
  639. } else if uv.ptr == unsafeZeroAddr {
  640. if urv.ptr != unsafeZeroAddr {
  641. typedmemclr(urv.typ, urv.ptr)
  642. }
  643. } else {
  644. typedmemmove(urv.typ, urv.ptr, uv.ptr)
  645. }
  646. }
  647. // rvSetDirectZero is rv.Set(reflect.Zero(rv.Type()) for all kinds except reflect.Interface.
  648. func rvSetDirectZero(rv reflect.Value) {
  649. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  650. if urv.ptr != unsafeZeroAddr {
  651. typedmemclr(urv.typ, urv.ptr)
  652. }
  653. }
  654. // rvMakeSlice updates the slice to point to a new array.
  655. // It copies data from old slice to new slice.
  656. // It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
  657. func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (_ reflect.Value, set bool) {
  658. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  659. ux := (*unsafeSlice)(urv.ptr)
  660. t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
  661. s := unsafeSlice{newarray(t, xcap), xlen, xcap}
  662. if ux.Len > 0 {
  663. typedslicecopy(t, s, *ux)
  664. }
  665. *ux = s
  666. return rv, true
  667. }
  668. // rvSlice returns a sub-slice of the slice given new lenth,
  669. // without modifying passed in value.
  670. // It is typically called when we know that SetLen(...) cannot be done.
  671. func rvSlice(rv reflect.Value, length int) reflect.Value {
  672. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  673. var x []struct{}
  674. ux := (*unsafeSlice)(unsafe.Pointer(&x))
  675. *ux = *(*unsafeSlice)(urv.ptr)
  676. ux.Len = length
  677. urv.ptr = unsafe.Pointer(ux)
  678. return rv
  679. }
  680. // rcGrowSlice updates the slice to point to a new array with the cap incremented, and len set to the new cap value.
  681. // It copies data from old slice to new slice.
  682. // It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
  683. func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value, newcap int, set bool) {
  684. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  685. ux := (*unsafeSlice)(urv.ptr)
  686. t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
  687. *ux = unsafeGrowslice(t, *ux, cap, incr)
  688. ux.Len = ux.Cap
  689. return rv, ux.Cap, true
  690. }
  691. // ------------
  692. func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
  693. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  694. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  695. uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data) + uintptr(int(ti.elemsize)*i))
  696. uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
  697. uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
  698. return
  699. }
  700. func rvSliceZeroCap(t reflect.Type) (v reflect.Value) {
  701. urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  702. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  703. urv.flag = uintptr(reflect.Slice) | unsafeFlagIndir
  704. urv.ptr = unsafe.Pointer(&unsafeZeroSlice)
  705. return
  706. }
  707. func rvLenSlice(rv reflect.Value) int {
  708. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  709. return (*unsafeSlice)(urv.ptr).Len
  710. }
  711. func rvCapSlice(rv reflect.Value) int {
  712. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  713. return (*unsafeSlice)(urv.ptr).Cap
  714. }
  715. func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
  716. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  717. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  718. uv.ptr = unsafe.Pointer(uintptr(urv.ptr) + uintptr(int(ti.elemsize)*i))
  719. uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
  720. uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
  721. return
  722. }
  723. // if scratch is nil, then return a writable view (assuming canAddr=true)
  724. func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
  725. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  726. bx := (*unsafeSlice)(unsafe.Pointer(&bs))
  727. bx.Data = urv.ptr
  728. bx.Len = rv.Len()
  729. bx.Cap = bx.Len
  730. return
  731. }
  732. func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
  733. // It is possible that this slice is based off an array with a larger
  734. // len that we want (where array len == slice cap).
  735. // However, it is ok to create an array type that is a subset of the full
  736. // e.g. full slice is based off a *[16]byte, but we can create a *[4]byte
  737. // off of it. That is ok.
  738. //
  739. // Consequently, we use rvLenSlice, not rvCapSlice.
  740. t := reflectArrayOf(rvLenSlice(rv), rvType(rv).Elem())
  741. // v = rvZeroAddrK(t, reflect.Array)
  742. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  743. uv.flag = uintptr(reflect.Array) | unsafeFlagIndir | unsafeFlagAddr
  744. uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  745. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  746. uv.ptr = *(*unsafe.Pointer)(urv.ptr) // slice rv has a ptr to the slice.
  747. return
  748. }
  749. func rvGetSlice4Array(rv reflect.Value, v interface{}) {
  750. // v is a pointer to a slice to be populated
  751. uv := (*unsafeIntf)(unsafe.Pointer(&v))
  752. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  753. s := (*unsafeSlice)(uv.ptr)
  754. s.Data = urv.ptr
  755. s.Len = rv.Len()
  756. s.Cap = s.Len
  757. }
  758. func rvCopySlice(dest, src reflect.Value, elemType reflect.Type) {
  759. typedslicecopy((*unsafeIntf)(unsafe.Pointer(&elemType)).ptr,
  760. *(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&dest)).ptr),
  761. *(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&src)).ptr))
  762. }
  763. // ------------
  764. func rvGetBool(rv reflect.Value) bool {
  765. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  766. return *(*bool)(v.ptr)
  767. }
  768. func rvGetBytes(rv reflect.Value) []byte {
  769. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  770. return *(*[]byte)(v.ptr)
  771. }
  772. func rvGetTime(rv reflect.Value) time.Time {
  773. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  774. return *(*time.Time)(v.ptr)
  775. }
  776. func rvGetString(rv reflect.Value) string {
  777. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  778. return *(*string)(v.ptr)
  779. }
  780. func rvGetFloat64(rv reflect.Value) float64 {
  781. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  782. return *(*float64)(v.ptr)
  783. }
  784. func rvGetFloat32(rv reflect.Value) float32 {
  785. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  786. return *(*float32)(v.ptr)
  787. }
  788. func rvGetComplex64(rv reflect.Value) complex64 {
  789. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  790. return *(*complex64)(v.ptr)
  791. }
  792. func rvGetComplex128(rv reflect.Value) complex128 {
  793. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  794. return *(*complex128)(v.ptr)
  795. }
  796. func rvGetInt(rv reflect.Value) int {
  797. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  798. return *(*int)(v.ptr)
  799. }
  800. func rvGetInt8(rv reflect.Value) int8 {
  801. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  802. return *(*int8)(v.ptr)
  803. }
  804. func rvGetInt16(rv reflect.Value) int16 {
  805. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  806. return *(*int16)(v.ptr)
  807. }
  808. func rvGetInt32(rv reflect.Value) int32 {
  809. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  810. return *(*int32)(v.ptr)
  811. }
  812. func rvGetInt64(rv reflect.Value) int64 {
  813. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  814. return *(*int64)(v.ptr)
  815. }
  816. func rvGetUint(rv reflect.Value) uint {
  817. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  818. return *(*uint)(v.ptr)
  819. }
  820. func rvGetUint8(rv reflect.Value) uint8 {
  821. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  822. return *(*uint8)(v.ptr)
  823. }
  824. func rvGetUint16(rv reflect.Value) uint16 {
  825. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  826. return *(*uint16)(v.ptr)
  827. }
  828. func rvGetUint32(rv reflect.Value) uint32 {
  829. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  830. return *(*uint32)(v.ptr)
  831. }
  832. func rvGetUint64(rv reflect.Value) uint64 {
  833. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  834. return *(*uint64)(v.ptr)
  835. }
  836. func rvGetUintptr(rv reflect.Value) uintptr {
  837. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  838. return *(*uintptr)(v.ptr)
  839. }
  840. func rvLenMap(rv reflect.Value) int {
  841. // maplen is not inlined, because as of go1.16beta, go:linkname's are not inlined.
  842. // thus, faster to call rv.Len() directly.
  843. //
  844. // MARKER: review after https://github.com/golang/go/issues/20019 fixed.
  845. // return rv.Len()
  846. return len_map(rvRefPtr((*unsafeReflectValue)(unsafe.Pointer(&rv))))
  847. }
  848. // Note: it is hard to find len(...) of an array type,
  849. // as that is a field in the arrayType representing the array, and hard to introspect.
  850. //
  851. // func rvLenArray(rv reflect.Value) int { return rv.Len() }
  852. // ------------ map range and map indexing ----------
  853. // regular calls to map via reflection: MapKeys, MapIndex, MapRange/MapIter etc
  854. // will always allocate for each map key or value.
  855. //
  856. // It is more performant to provide a value that the map entry is set into,
  857. // and that elides the allocation.
  858. // go 1.4+ has runtime/hashmap.go or runtime/map.go which has a
  859. // hIter struct with the first 2 values being key and value
  860. // of the current iteration.
  861. //
  862. // This *hIter is passed to mapiterinit, mapiternext, mapiterkey, mapiterelem.
  863. // We bypass the reflect wrapper functions and just use the *hIter directly.
  864. //
  865. // Though *hIter has many fields, we only care about the first 2.
  866. //
  867. // We directly embed this in unsafeMapIter below
  868. //
  869. // hiter is typically about 12 words, but we just fill up unsafeMapIter to 32 words,
  870. // so it fills multiple cache lines and can give some extra space to accomodate small growth.
  871. type unsafeMapIter struct {
  872. mtyp, mptr unsafe.Pointer
  873. k, v reflect.Value
  874. kisref bool
  875. visref bool
  876. mapvalues bool
  877. done bool
  878. started bool
  879. _ [3]byte // padding
  880. it struct {
  881. key unsafe.Pointer
  882. value unsafe.Pointer
  883. _ [20]uintptr // padding for other fields (to make up 32 words for enclosing struct)
  884. }
  885. }
  886. func (t *unsafeMapIter) Next() (r bool) {
  887. if t == nil || t.done {
  888. return
  889. }
  890. if t.started {
  891. mapiternext((unsafe.Pointer)(&t.it))
  892. } else {
  893. t.started = true
  894. }
  895. t.done = t.it.key == nil
  896. if t.done {
  897. return
  898. }
  899. if helperUnsafeDirectAssignMapEntry || t.kisref {
  900. (*unsafeReflectValue)(unsafe.Pointer(&t.k)).ptr = t.it.key
  901. } else {
  902. k := (*unsafeReflectValue)(unsafe.Pointer(&t.k))
  903. typedmemmove(k.typ, k.ptr, t.it.key)
  904. }
  905. if t.mapvalues {
  906. if helperUnsafeDirectAssignMapEntry || t.visref {
  907. (*unsafeReflectValue)(unsafe.Pointer(&t.v)).ptr = t.it.value
  908. } else {
  909. v := (*unsafeReflectValue)(unsafe.Pointer(&t.v))
  910. typedmemmove(v.typ, v.ptr, t.it.value)
  911. }
  912. }
  913. return true
  914. }
  915. func (t *unsafeMapIter) Key() (r reflect.Value) {
  916. return t.k
  917. }
  918. func (t *unsafeMapIter) Value() (r reflect.Value) {
  919. return t.v
  920. }
  921. func (t *unsafeMapIter) Done() {}
  922. type mapIter struct {
  923. unsafeMapIter
  924. }
  925. func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) {
  926. if rvIsNil(m) {
  927. t.done = true
  928. return
  929. }
  930. t.done = false
  931. t.started = false
  932. t.mapvalues = mapvalues
  933. // var urv *unsafeReflectValue
  934. urv := (*unsafeReflectValue)(unsafe.Pointer(&m))
  935. t.mtyp = urv.typ
  936. t.mptr = rvRefPtr(urv)
  937. // t.it = (*unsafeMapHashIter)(reflect_mapiterinit(t.mtyp, t.mptr))
  938. mapiterinit(t.mtyp, t.mptr, unsafe.Pointer(&t.it))
  939. t.k = k
  940. t.kisref = refBitset.isset(byte(k.Kind()))
  941. if mapvalues {
  942. t.v = v
  943. t.visref = refBitset.isset(byte(v.Kind()))
  944. } else {
  945. t.v = reflect.Value{}
  946. }
  947. }
  948. // unsafeMapKVPtr returns the pointer if flagIndir, else it returns a pointer to the pointer.
  949. // It is needed as maps always keep a reference to the underlying value.
  950. func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer {
  951. if urv.flag&unsafeFlagIndir == 0 {
  952. return unsafe.Pointer(&urv.ptr)
  953. }
  954. return urv.ptr
  955. }
  956. // func mapDelete(m, k reflect.Value) {
  957. // var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
  958. // var kptr = unsafeMapKVPtr(urv)
  959. // urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
  960. // mapdelete(urv.typ, rv2ptr(urv), kptr)
  961. // }
  962. // return an addressable reflect value that can be used in mapRange and mapGet operations.
  963. //
  964. // all calls to mapGet or mapRange will call here to get an addressable reflect.Value.
  965. func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
  966. // return rvZeroAddrK(t, k)
  967. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  968. urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
  969. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  970. // since we always set the ptr when helperUnsafeDirectAssignMapEntry=true,
  971. // we should only allocate if it is not true
  972. if !helperUnsafeDirectAssignMapEntry {
  973. urv.ptr = unsafeNew(urv.typ)
  974. }
  975. return
  976. }
  977. // ---------- ENCODER optimized ---------------
  978. func (e *Encoder) jsondriver() *jsonEncDriver {
  979. return (*jsonEncDriver)((*unsafeIntf)(unsafe.Pointer(&e.e)).ptr)
  980. }
  981. func (d *Decoder) zerocopystate() bool {
  982. return d.decByteState == decByteStateZerocopy && d.h.ZeroCopy
  983. }
  984. func (d *Decoder) stringZC(v []byte) (s string) {
  985. if d.zerocopystate() {
  986. return stringView(v)
  987. }
  988. return d.string(v)
  989. }
  990. func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
  991. if !d.zerocopystate() {
  992. *callFnRvk = true
  993. if d.decByteState == decByteStateReuseBuf {
  994. *kstrbs = append((*kstrbs)[:0], (*kstr2bs)...)
  995. *kstr2bs = *kstrbs
  996. }
  997. }
  998. return stringView(*kstr2bs)
  999. }
  1000. // ---------- DECODER optimized ---------------
  1001. func (d *Decoder) checkBreak() bool {
  1002. // MARKER: jsonDecDriver.CheckBreak() costs over 80, and this isn't inlined.
  1003. // Consequently, there's no benefit in incurring the cost of this
  1004. // wrapping function checkBreak.
  1005. //
  1006. // It is faster to just call the interface method directly.
  1007. // if d.js {
  1008. // return d.jsondriver().CheckBreak()
  1009. // }
  1010. // if d.cbor {
  1011. // return d.cbordriver().CheckBreak()
  1012. // }
  1013. return d.d.CheckBreak()
  1014. }
  1015. func (d *Decoder) jsondriver() *jsonDecDriver {
  1016. return (*jsonDecDriver)((*unsafeIntf)(unsafe.Pointer(&d.d)).ptr)
  1017. }
  1018. // ---------- structFieldInfo optimized ---------------
  1019. func (n *structFieldInfoPathNode) rvField(v reflect.Value) (rv reflect.Value) {
  1020. // we already know this is exported, and maybe embedded (based on what si says)
  1021. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  1022. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  1023. // clear flagEmbedRO if necessary, and inherit permission bits from v
  1024. urv.flag = uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind)
  1025. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr
  1026. urv.ptr = unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset))
  1027. return
  1028. }
  1029. // runtime chan and map are designed such that the first field is the count.
  1030. // len builtin uses this to get the length of a chan/map easily.
  1031. // leverage this knowledge, since maplen and chanlen functions from runtime package
  1032. // are go:linkname'd here, and thus not inlined as of go1.16beta
  1033. func len_map_chan(m unsafe.Pointer) int {
  1034. if m == nil {
  1035. return 0
  1036. }
  1037. return *((*int)(m))
  1038. }
  1039. func len_map(m unsafe.Pointer) int {
  1040. // return maplen(m)
  1041. return len_map_chan(m)
  1042. }
  1043. func len_chan(m unsafe.Pointer) int {
  1044. // return chanlen(m)
  1045. return len_map_chan(m)
  1046. }
  1047. func unsafeNew(typ unsafe.Pointer) unsafe.Pointer {
  1048. return mallocgc(rtsize2(typ), typ, true)
  1049. }
  1050. // ---------- go linknames (LINKED to runtime/reflect) ---------------
  1051. // MARKER: always check that these linknames match subsequent versions of go
  1052. //
  1053. // Note that as of Jan 2021 (go 1.16 release), go:linkname(s) are not inlined
  1054. // outside of the standard library use (e.g. within sync, reflect, etc).
  1055. // If these link'ed functions were normally inlined, calling them here would
  1056. // not necessarily give a performance boost, due to function overhead.
  1057. //
  1058. // However, it seems most of these functions are not inlined anyway,
  1059. // as only maplen, chanlen and mapaccess are small enough to get inlined.
  1060. //
  1061. // We checked this by going into $GOROOT/src/runtime and running:
  1062. // $ go build -tags codec.notfastpath -gcflags "-m=2"
  1063. // reflect.{unsafe_New, unsafe_NewArray} are not supported in gollvm,
  1064. // failing with "error: undefined reference" error.
  1065. // however, runtime.{mallocgc, newarray} are supported, so use that instead.
  1066. //go:linkname mallocgc runtime.mallocgc
  1067. //go:noescape
  1068. func mallocgc(size uintptr, typ unsafe.Pointer, needzero bool) unsafe.Pointer
  1069. //go:linkname newarray runtime.newarray
  1070. //go:noescape
  1071. func newarray(typ unsafe.Pointer, n int) unsafe.Pointer
  1072. //go:linkname mapiterinit runtime.mapiterinit
  1073. //go:noescape
  1074. func mapiterinit(typ unsafe.Pointer, m unsafe.Pointer, it unsafe.Pointer)
  1075. //go:linkname mapiternext runtime.mapiternext
  1076. //go:noescape
  1077. func mapiternext(it unsafe.Pointer) (key unsafe.Pointer)
  1078. //go:linkname mapdelete runtime.mapdelete
  1079. //go:noescape
  1080. func mapdelete(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer)
  1081. //go:linkname mapassign runtime.mapassign
  1082. //go:noescape
  1083. func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
  1084. //go:linkname mapaccess2 runtime.mapaccess2
  1085. //go:noescape
  1086. func mapaccess2(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer, ok bool)
  1087. // reflect.typed{memmove, memclr, slicecopy} will handle checking if the type has pointers or not,
  1088. // and if a writeBarrier is needed, before delegating to the right method in the runtime.
  1089. //
  1090. // This is why we use the functions in reflect, and not the ones in runtime directly.
  1091. // Calling runtime.XXX here will lead to memory issues.
  1092. //go:linkname typedslicecopy reflect.typedslicecopy
  1093. //go:noescape
  1094. func typedslicecopy(elemType unsafe.Pointer, dst, src unsafeSlice) int
  1095. //go:linkname typedmemmove reflect.typedmemmove
  1096. //go:noescape
  1097. func typedmemmove(typ unsafe.Pointer, dst, src unsafe.Pointer)
  1098. //go:linkname typedmemclr reflect.typedmemclr
  1099. //go:noescape
  1100. func typedmemclr(typ unsafe.Pointer, dst unsafe.Pointer)