Lines Matching refs:nvm
34 int (*read_version)(struct tb_nvm *nvm);
35 int (*validate)(struct tb_nvm *nvm);
36 int (*write_headers)(struct tb_nvm *nvm);
52 static int intel_switch_nvm_version(struct tb_nvm *nvm) in intel_switch_nvm_version() argument
54 struct tb_switch *sw = tb_to_switch(nvm->dev); in intel_switch_nvm_version()
78 nvm->major = (val >> 16) & 0xff; in intel_switch_nvm_version()
79 nvm->minor = (val >> 8) & 0xff; in intel_switch_nvm_version()
80 nvm->active_size = nvm_size; in intel_switch_nvm_version()
85 static int intel_switch_nvm_validate(struct tb_nvm *nvm) in intel_switch_nvm_validate() argument
87 struct tb_switch *sw = tb_to_switch(nvm->dev); in intel_switch_nvm_validate()
90 u8 *buf = nvm->buf; in intel_switch_nvm_validate()
92 image_size = nvm->buf_data_size; in intel_switch_nvm_validate()
126 nvm->buf_data_start = buf + hdr_size; in intel_switch_nvm_validate()
127 nvm->buf_data_size = image_size - hdr_size; in intel_switch_nvm_validate()
132 static int intel_switch_nvm_write_headers(struct tb_nvm *nvm) in intel_switch_nvm_write_headers() argument
134 struct tb_switch *sw = tb_to_switch(nvm->dev); in intel_switch_nvm_write_headers()
141 DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS, in intel_switch_nvm_write_headers()
156 static int asmedia_switch_nvm_version(struct tb_nvm *nvm) in asmedia_switch_nvm_version() argument
158 struct tb_switch *sw = tb_to_switch(nvm->dev); in asmedia_switch_nvm_version()
166 nvm->major = (val << 16) & 0xff0000; in asmedia_switch_nvm_version()
167 nvm->major |= val & 0x00ff00; in asmedia_switch_nvm_version()
168 nvm->major |= (val >> 16) & 0x0000ff; in asmedia_switch_nvm_version()
174 nvm->minor = (val << 16) & 0xff0000; in asmedia_switch_nvm_version()
175 nvm->minor |= val & 0x00ff00; in asmedia_switch_nvm_version()
176 nvm->minor |= (val >> 16) & 0x0000ff; in asmedia_switch_nvm_version()
179 nvm->active_size = SZ_512K; in asmedia_switch_nvm_version()
195 static int intel_retimer_nvm_version(struct tb_nvm *nvm) in intel_retimer_nvm_version() argument
197 struct tb_retimer *rt = tb_to_retimer(nvm->dev); in intel_retimer_nvm_version()
205 nvm->major = (val >> 16) & 0xff; in intel_retimer_nvm_version()
206 nvm->minor = (val >> 8) & 0xff; in intel_retimer_nvm_version()
214 nvm->active_size = nvm_size; in intel_retimer_nvm_version()
219 static int intel_retimer_nvm_validate(struct tb_nvm *nvm) in intel_retimer_nvm_validate() argument
221 struct tb_retimer *rt = tb_to_retimer(nvm->dev); in intel_retimer_nvm_validate()
223 u8 *buf = nvm->buf; in intel_retimer_nvm_validate()
226 image_size = nvm->buf_data_size; in intel_retimer_nvm_validate()
257 nvm->buf_data_start = buf + hdr_size; in intel_retimer_nvm_validate()
258 nvm->buf_data_size = image_size - hdr_size; in intel_retimer_nvm_validate()
284 struct tb_nvm *nvm; in tb_nvm_alloc() local
325 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); in tb_nvm_alloc()
326 if (!nvm) in tb_nvm_alloc()
331 kfree(nvm); in tb_nvm_alloc()
335 nvm->id = ret; in tb_nvm_alloc()
336 nvm->dev = dev; in tb_nvm_alloc()
337 nvm->vops = vops; in tb_nvm_alloc()
339 return nvm; in tb_nvm_alloc()
350 int tb_nvm_read_version(struct tb_nvm *nvm) in tb_nvm_read_version() argument
352 const struct tb_nvm_vendor_ops *vops = nvm->vops; in tb_nvm_read_version()
355 return vops->read_version(nvm); in tb_nvm_read_version()
371 int tb_nvm_validate(struct tb_nvm *nvm) in tb_nvm_validate() argument
373 const struct tb_nvm_vendor_ops *vops = nvm->vops; in tb_nvm_validate()
375 u8 *buf = nvm->buf; in tb_nvm_validate()
383 image_size = nvm->buf_data_size; in tb_nvm_validate()
391 nvm->buf_data_start = buf; in tb_nvm_validate()
393 return vops->validate ? vops->validate(nvm) : 0; in tb_nvm_validate()
406 int tb_nvm_write_headers(struct tb_nvm *nvm) in tb_nvm_write_headers() argument
408 const struct tb_nvm_vendor_ops *vops = nvm->vops; in tb_nvm_write_headers()
410 return vops->write_headers ? vops->write_headers(nvm) : 0; in tb_nvm_write_headers()
424 int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read) in tb_nvm_add_active() argument
434 config.id = nvm->id; in tb_nvm_add_active()
437 config.size = nvm->active_size; in tb_nvm_add_active()
438 config.dev = nvm->dev; in tb_nvm_add_active()
440 config.priv = nvm; in tb_nvm_add_active()
446 nvm->active = nvmem; in tb_nvm_add_active()
461 int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, in tb_nvm_write_buf() argument
464 if (!nvm->buf) { in tb_nvm_write_buf()
465 nvm->buf = vmalloc(NVM_MAX_SIZE); in tb_nvm_write_buf()
466 if (!nvm->buf) in tb_nvm_write_buf()
470 nvm->flushed = false; in tb_nvm_write_buf()
471 nvm->buf_data_size = offset + bytes; in tb_nvm_write_buf()
472 memcpy(nvm->buf + offset, val, bytes); in tb_nvm_write_buf()
489 int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write) in tb_nvm_add_non_active() argument
499 config.id = nvm->id; in tb_nvm_add_non_active()
503 config.dev = nvm->dev; in tb_nvm_add_non_active()
505 config.priv = nvm; in tb_nvm_add_non_active()
511 nvm->non_active = nvmem; in tb_nvm_add_non_active()
521 void tb_nvm_free(struct tb_nvm *nvm) in tb_nvm_free() argument
523 if (nvm) { in tb_nvm_free()
524 nvmem_unregister(nvm->non_active); in tb_nvm_free()
525 nvmem_unregister(nvm->active); in tb_nvm_free()
526 vfree(nvm->buf); in tb_nvm_free()
527 ida_simple_remove(&nvm_ida, nvm->id); in tb_nvm_free()
529 kfree(nvm); in tb_nvm_free()