1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2022 ROHM Semiconductors
4 *
5 * ROHM/KIONIX KX022A accelerometer driver
6 */
7
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/mutex.h>
14 #include <linux/property.h>
15 #include <linux/regmap.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/slab.h>
18 #include <linux/string_helpers.h>
19 #include <linux/units.h>
20
21 #include <linux/iio/iio.h>
22 #include <linux/iio/sysfs.h>
23 #include <linux/iio/trigger.h>
24 #include <linux/iio/trigger_consumer.h>
25 #include <linux/iio/triggered_buffer.h>
26
27 #include "kionix-kx022a.h"
28
29 /*
30 * The KX022A has FIFO which can store 43 samples of HiRes data from 2
31 * channels. This equals to 43 (samples) * 3 (channels) * 2 (bytes/sample) to
32 * 258 bytes of sample data. The quirk to know is that the amount of bytes in
33 * the FIFO is advertised via 8 bit register (max value 255). The thing to note
34 * is that full 258 bytes of data is indicated using the max value 255.
35 */
36 #define KX022A_FIFO_LENGTH 43
37 #define KX022A_FIFO_FULL_VALUE 255
38 #define KX022A_SOFT_RESET_WAIT_TIME_US (5 * USEC_PER_MSEC)
39 #define KX022A_SOFT_RESET_TOTAL_WAIT_TIME_US (500 * USEC_PER_MSEC)
40
41 /* 3 axis, 2 bytes of data for each of the axis */
42 #define KX022A_FIFO_SAMPLES_SIZE_BYTES 6
43 #define KX022A_FIFO_MAX_BYTES \
44 (KX022A_FIFO_LENGTH * KX022A_FIFO_SAMPLES_SIZE_BYTES)
45
46 enum {
47 KX022A_STATE_SAMPLE,
48 KX022A_STATE_FIFO,
49 };
50
51 /* Regmap configs */
52 static const struct regmap_range kx022a_volatile_ranges[] = {
53 {
54 .range_min = KX022A_REG_XHP_L,
55 .range_max = KX022A_REG_COTR,
56 }, {
57 .range_min = KX022A_REG_TSCP,
58 .range_max = KX022A_REG_INT_REL,
59 }, {
60 /* The reset bit will be cleared by sensor */
61 .range_min = KX022A_REG_CNTL2,
62 .range_max = KX022A_REG_CNTL2,
63 }, {
64 .range_min = KX022A_REG_BUF_STATUS_1,
65 .range_max = KX022A_REG_BUF_READ,
66 },
67 };
68
69 static const struct regmap_access_table kx022a_volatile_regs = {
70 .yes_ranges = &kx022a_volatile_ranges[0],
71 .n_yes_ranges = ARRAY_SIZE(kx022a_volatile_ranges),
72 };
73
74 static const struct regmap_range kx022a_precious_ranges[] = {
75 {
76 .range_min = KX022A_REG_INT_REL,
77 .range_max = KX022A_REG_INT_REL,
78 },
79 };
80
81 static const struct regmap_access_table kx022a_precious_regs = {
82 .yes_ranges = &kx022a_precious_ranges[0],
83 .n_yes_ranges = ARRAY_SIZE(kx022a_precious_ranges),
84 };
85
86 /*
87 * The HW does not set WHO_AM_I reg as read-only but we don't want to write it
88 * so we still include it in the read-only ranges.
89 */
90 static const struct regmap_range kx022a_read_only_ranges[] = {
91 {
92 .range_min = KX022A_REG_XHP_L,
93 .range_max = KX022A_REG_INT_REL,
94 }, {
95 .range_min = KX022A_REG_BUF_STATUS_1,
96 .range_max = KX022A_REG_BUF_STATUS_2,
97 }, {
98 .range_min = KX022A_REG_BUF_READ,
99 .range_max = KX022A_REG_BUF_READ,
100 },
101 };
102
103 static const struct regmap_access_table kx022a_ro_regs = {
104 .no_ranges = &kx022a_read_only_ranges[0],
105 .n_no_ranges = ARRAY_SIZE(kx022a_read_only_ranges),
106 };
107
108 static const struct regmap_range kx022a_write_only_ranges[] = {
109 {
110 .range_min = KX022A_REG_BTS_WUF_TH,
111 .range_max = KX022A_REG_BTS_WUF_TH,
112 }, {
113 .range_min = KX022A_REG_MAN_WAKE,
114 .range_max = KX022A_REG_MAN_WAKE,
115 }, {
116 .range_min = KX022A_REG_SELF_TEST,
117 .range_max = KX022A_REG_SELF_TEST,
118 }, {
119 .range_min = KX022A_REG_BUF_CLEAR,
120 .range_max = KX022A_REG_BUF_CLEAR,
121 },
122 };
123
124 static const struct regmap_access_table kx022a_wo_regs = {
125 .no_ranges = &kx022a_write_only_ranges[0],
126 .n_no_ranges = ARRAY_SIZE(kx022a_write_only_ranges),
127 };
128
129 static const struct regmap_range kx022a_noinc_read_ranges[] = {
130 {
131 .range_min = KX022A_REG_BUF_READ,
132 .range_max = KX022A_REG_BUF_READ,
133 },
134 };
135
136 static const struct regmap_access_table kx022a_nir_regs = {
137 .yes_ranges = &kx022a_noinc_read_ranges[0],
138 .n_yes_ranges = ARRAY_SIZE(kx022a_noinc_read_ranges),
139 };
140
141 const struct regmap_config kx022a_regmap = {
142 .reg_bits = 8,
143 .val_bits = 8,
144 .volatile_table = &kx022a_volatile_regs,
145 .rd_table = &kx022a_wo_regs,
146 .wr_table = &kx022a_ro_regs,
147 .rd_noinc_table = &kx022a_nir_regs,
148 .precious_table = &kx022a_precious_regs,
149 .max_register = KX022A_MAX_REGISTER,
150 .cache_type = REGCACHE_RBTREE,
151 };
152 EXPORT_SYMBOL_NS_GPL(kx022a_regmap, IIO_KX022A);
153
154 struct kx022a_data {
155 struct regmap *regmap;
156 struct iio_trigger *trig;
157 struct device *dev;
158 struct iio_mount_matrix orientation;
159 int64_t timestamp, old_timestamp;
160
161 int irq;
162 int inc_reg;
163 int ien_reg;
164
165 unsigned int g_range;
166 unsigned int state;
167 unsigned int odr_ns;
168
169 bool trigger_enabled;
170 /*
171 * Prevent toggling the sensor stby/active state (PC1 bit) in the
172 * middle of a configuration, or when the fifo is enabled. Also,
173 * protect the data stored/retrieved from this structure from
174 * concurrent accesses.
175 */
176 struct mutex mutex;
177 u8 watermark;
178
179 /* 3 x 16bit accel data + timestamp */
180 __le16 buffer[8] __aligned(IIO_DMA_MINALIGN);
181 struct {
182 __le16 channels[3];
183 s64 ts __aligned(8);
184 } scan;
185 };
186
187 static const struct iio_mount_matrix *
kx022a_get_mount_matrix(const struct iio_dev * idev,const struct iio_chan_spec * chan)188 kx022a_get_mount_matrix(const struct iio_dev *idev,
189 const struct iio_chan_spec *chan)
190 {
191 struct kx022a_data *data = iio_priv(idev);
192
193 return &data->orientation;
194 }
195
196 enum {
197 AXIS_X,
198 AXIS_Y,
199 AXIS_Z,
200 AXIS_MAX
201 };
202
203 static const unsigned long kx022a_scan_masks[] = {
204 BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z), 0
205 };
206
207 static const struct iio_chan_spec_ext_info kx022a_ext_info[] = {
208 IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kx022a_get_mount_matrix),
209 { }
210 };
211
212 #define KX022A_ACCEL_CHAN(axis, index) \
213 { \
214 .type = IIO_ACCEL, \
215 .modified = 1, \
216 .channel2 = IIO_MOD_##axis, \
217 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
218 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
219 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
220 .info_mask_shared_by_type_available = \
221 BIT(IIO_CHAN_INFO_SCALE) | \
222 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
223 .ext_info = kx022a_ext_info, \
224 .address = KX022A_REG_##axis##OUT_L, \
225 .scan_index = index, \
226 .scan_type = { \
227 .sign = 's', \
228 .realbits = 16, \
229 .storagebits = 16, \
230 .endianness = IIO_LE, \
231 }, \
232 }
233
234 static const struct iio_chan_spec kx022a_channels[] = {
235 KX022A_ACCEL_CHAN(X, 0),
236 KX022A_ACCEL_CHAN(Y, 1),
237 KX022A_ACCEL_CHAN(Z, 2),
238 IIO_CHAN_SOFT_TIMESTAMP(3),
239 };
240
241 /*
242 * The sensor HW can support ODR up to 1600 Hz, which is beyond what most of the
243 * Linux CPUs can handle without dropping samples. Also, the low power mode is
244 * not available for higher sample rates. Thus, the driver only supports 200 Hz
245 * and slower ODRs. The slowest is 0.78 Hz.
246 */
247 static const int kx022a_accel_samp_freq_table[][2] = {
248 { 0, 780000 },
249 { 1, 563000 },
250 { 3, 125000 },
251 { 6, 250000 },
252 { 12, 500000 },
253 { 25, 0 },
254 { 50, 0 },
255 { 100, 0 },
256 { 200, 0 },
257 };
258
259 static const unsigned int kx022a_odrs[] = {
260 1282051282,
261 639795266,
262 320 * MEGA,
263 160 * MEGA,
264 80 * MEGA,
265 40 * MEGA,
266 20 * MEGA,
267 10 * MEGA,
268 5 * MEGA,
269 };
270
271 /*
272 * range is typically +-2G/4G/8G/16G, distributed over the amount of bits.
273 * The scale table can be calculated using
274 * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
275 * => KX022A uses 16 bit (HiRes mode - assume the low 8 bits are zeroed
276 * in low-power mode(?) )
277 * => +/-2G => 4 / 2^16 * 9,80665 * 10^6 (to scale to micro)
278 * => +/-2G - 598.550415
279 * +/-4G - 1197.10083
280 * +/-8G - 2394.20166
281 * +/-16G - 4788.40332
282 */
283 static const int kx022a_scale_table[][2] = {
284 { 598, 550415 },
285 { 1197, 100830 },
286 { 2394, 201660 },
287 { 4788, 403320 },
288 };
289
kx022a_read_avail(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,const int ** vals,int * type,int * length,long mask)290 static int kx022a_read_avail(struct iio_dev *indio_dev,
291 struct iio_chan_spec const *chan,
292 const int **vals, int *type, int *length,
293 long mask)
294 {
295 switch (mask) {
296 case IIO_CHAN_INFO_SAMP_FREQ:
297 *vals = (const int *)kx022a_accel_samp_freq_table;
298 *length = ARRAY_SIZE(kx022a_accel_samp_freq_table) *
299 ARRAY_SIZE(kx022a_accel_samp_freq_table[0]);
300 *type = IIO_VAL_INT_PLUS_MICRO;
301 return IIO_AVAIL_LIST;
302 case IIO_CHAN_INFO_SCALE:
303 *vals = (const int *)kx022a_scale_table;
304 *length = ARRAY_SIZE(kx022a_scale_table) *
305 ARRAY_SIZE(kx022a_scale_table[0]);
306 *type = IIO_VAL_INT_PLUS_MICRO;
307 return IIO_AVAIL_LIST;
308 default:
309 return -EINVAL;
310 }
311 }
312
313 #define KX022A_DEFAULT_PERIOD_NS (20 * NSEC_PER_MSEC)
314
kx022a_reg2freq(unsigned int val,int * val1,int * val2)315 static void kx022a_reg2freq(unsigned int val, int *val1, int *val2)
316 {
317 *val1 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][0];
318 *val2 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][1];
319 }
320
kx022a_reg2scale(unsigned int val,unsigned int * val1,unsigned int * val2)321 static void kx022a_reg2scale(unsigned int val, unsigned int *val1,
322 unsigned int *val2)
323 {
324 val &= KX022A_MASK_GSEL;
325 val >>= KX022A_GSEL_SHIFT;
326
327 *val1 = kx022a_scale_table[val][0];
328 *val2 = kx022a_scale_table[val][1];
329 }
330
kx022a_turn_on_off_unlocked(struct kx022a_data * data,bool on)331 static int kx022a_turn_on_off_unlocked(struct kx022a_data *data, bool on)
332 {
333 int ret;
334
335 if (on)
336 ret = regmap_set_bits(data->regmap, KX022A_REG_CNTL,
337 KX022A_MASK_PC1);
338 else
339 ret = regmap_clear_bits(data->regmap, KX022A_REG_CNTL,
340 KX022A_MASK_PC1);
341 if (ret)
342 dev_err(data->dev, "Turn %s fail %d\n", str_on_off(on), ret);
343
344 return ret;
345
346 }
347
kx022a_turn_off_lock(struct kx022a_data * data)348 static int kx022a_turn_off_lock(struct kx022a_data *data)
349 {
350 int ret;
351
352 mutex_lock(&data->mutex);
353 ret = kx022a_turn_on_off_unlocked(data, false);
354 if (ret)
355 mutex_unlock(&data->mutex);
356
357 return ret;
358 }
359
kx022a_turn_on_unlock(struct kx022a_data * data)360 static int kx022a_turn_on_unlock(struct kx022a_data *data)
361 {
362 int ret;
363
364 ret = kx022a_turn_on_off_unlocked(data, true);
365 mutex_unlock(&data->mutex);
366
367 return ret;
368 }
369
kx022a_write_raw(struct iio_dev * idev,struct iio_chan_spec const * chan,int val,int val2,long mask)370 static int kx022a_write_raw(struct iio_dev *idev,
371 struct iio_chan_spec const *chan,
372 int val, int val2, long mask)
373 {
374 struct kx022a_data *data = iio_priv(idev);
375 int ret, n;
376
377 /*
378 * We should not allow changing scale or frequency when FIFO is running
379 * as it will mess the timestamp/scale for samples existing in the
380 * buffer. If this turns out to be an issue we can later change logic
381 * to internally flush the fifo before reconfiguring so the samples in
382 * fifo keep matching the freq/scale settings. (Such setup could cause
383 * issues if users trust the watermark to be reached within known
384 * time-limit).
385 */
386 ret = iio_device_claim_direct_mode(idev);
387 if (ret)
388 return ret;
389
390 switch (mask) {
391 case IIO_CHAN_INFO_SAMP_FREQ:
392 n = ARRAY_SIZE(kx022a_accel_samp_freq_table);
393
394 while (n--)
395 if (val == kx022a_accel_samp_freq_table[n][0] &&
396 val2 == kx022a_accel_samp_freq_table[n][1])
397 break;
398 if (n < 0) {
399 ret = -EINVAL;
400 goto unlock_out;
401 }
402 ret = kx022a_turn_off_lock(data);
403 if (ret)
404 break;
405
406 ret = regmap_update_bits(data->regmap,
407 KX022A_REG_ODCNTL,
408 KX022A_MASK_ODR, n);
409 data->odr_ns = kx022a_odrs[n];
410 kx022a_turn_on_unlock(data);
411 break;
412 case IIO_CHAN_INFO_SCALE:
413 n = ARRAY_SIZE(kx022a_scale_table);
414
415 while (n-- > 0)
416 if (val == kx022a_scale_table[n][0] &&
417 val2 == kx022a_scale_table[n][1])
418 break;
419 if (n < 0) {
420 ret = -EINVAL;
421 goto unlock_out;
422 }
423
424 ret = kx022a_turn_off_lock(data);
425 if (ret)
426 break;
427
428 ret = regmap_update_bits(data->regmap, KX022A_REG_CNTL,
429 KX022A_MASK_GSEL,
430 n << KX022A_GSEL_SHIFT);
431 kx022a_turn_on_unlock(data);
432 break;
433 default:
434 ret = -EINVAL;
435 break;
436 }
437
438 unlock_out:
439 iio_device_release_direct_mode(idev);
440
441 return ret;
442 }
443
kx022a_fifo_set_wmi(struct kx022a_data * data)444 static int kx022a_fifo_set_wmi(struct kx022a_data *data)
445 {
446 u8 threshold;
447
448 threshold = data->watermark;
449
450 return regmap_update_bits(data->regmap, KX022A_REG_BUF_CNTL1,
451 KX022A_MASK_WM_TH, threshold);
452 }
453
kx022a_get_axis(struct kx022a_data * data,struct iio_chan_spec const * chan,int * val)454 static int kx022a_get_axis(struct kx022a_data *data,
455 struct iio_chan_spec const *chan,
456 int *val)
457 {
458 int ret;
459
460 ret = regmap_bulk_read(data->regmap, chan->address, &data->buffer[0],
461 sizeof(__le16));
462 if (ret)
463 return ret;
464
465 *val = le16_to_cpu(data->buffer[0]);
466
467 return IIO_VAL_INT;
468 }
469
kx022a_read_raw(struct iio_dev * idev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)470 static int kx022a_read_raw(struct iio_dev *idev,
471 struct iio_chan_spec const *chan,
472 int *val, int *val2, long mask)
473 {
474 struct kx022a_data *data = iio_priv(idev);
475 unsigned int regval;
476 int ret;
477
478 switch (mask) {
479 case IIO_CHAN_INFO_RAW:
480 ret = iio_device_claim_direct_mode(idev);
481 if (ret)
482 return ret;
483
484 mutex_lock(&data->mutex);
485 ret = kx022a_get_axis(data, chan, val);
486 mutex_unlock(&data->mutex);
487
488 iio_device_release_direct_mode(idev);
489
490 return ret;
491
492 case IIO_CHAN_INFO_SAMP_FREQ:
493 ret = regmap_read(data->regmap, KX022A_REG_ODCNTL, ®val);
494 if (ret)
495 return ret;
496
497 if ((regval & KX022A_MASK_ODR) >
498 ARRAY_SIZE(kx022a_accel_samp_freq_table)) {
499 dev_err(data->dev, "Invalid ODR\n");
500 return -EINVAL;
501 }
502
503 kx022a_reg2freq(regval, val, val2);
504
505 return IIO_VAL_INT_PLUS_MICRO;
506
507 case IIO_CHAN_INFO_SCALE:
508 ret = regmap_read(data->regmap, KX022A_REG_CNTL, ®val);
509 if (ret < 0)
510 return ret;
511
512 kx022a_reg2scale(regval, val, val2);
513
514 return IIO_VAL_INT_PLUS_MICRO;
515 }
516
517 return -EINVAL;
518 };
519
kx022a_validate_trigger(struct iio_dev * idev,struct iio_trigger * trig)520 static int kx022a_validate_trigger(struct iio_dev *idev,
521 struct iio_trigger *trig)
522 {
523 struct kx022a_data *data = iio_priv(idev);
524
525 if (data->trig != trig)
526 return -EINVAL;
527
528 return 0;
529 }
530
kx022a_set_watermark(struct iio_dev * idev,unsigned int val)531 static int kx022a_set_watermark(struct iio_dev *idev, unsigned int val)
532 {
533 struct kx022a_data *data = iio_priv(idev);
534
535 if (val > KX022A_FIFO_LENGTH)
536 val = KX022A_FIFO_LENGTH;
537
538 mutex_lock(&data->mutex);
539 data->watermark = val;
540 mutex_unlock(&data->mutex);
541
542 return 0;
543 }
544
hwfifo_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)545 static ssize_t hwfifo_enabled_show(struct device *dev,
546 struct device_attribute *attr,
547 char *buf)
548 {
549 struct iio_dev *idev = dev_to_iio_dev(dev);
550 struct kx022a_data *data = iio_priv(idev);
551 bool state;
552
553 mutex_lock(&data->mutex);
554 state = data->state;
555 mutex_unlock(&data->mutex);
556
557 return sysfs_emit(buf, "%d\n", state);
558 }
559
hwfifo_watermark_show(struct device * dev,struct device_attribute * attr,char * buf)560 static ssize_t hwfifo_watermark_show(struct device *dev,
561 struct device_attribute *attr,
562 char *buf)
563 {
564 struct iio_dev *idev = dev_to_iio_dev(dev);
565 struct kx022a_data *data = iio_priv(idev);
566 int wm;
567
568 mutex_lock(&data->mutex);
569 wm = data->watermark;
570 mutex_unlock(&data->mutex);
571
572 return sysfs_emit(buf, "%d\n", wm);
573 }
574
575 static IIO_DEVICE_ATTR_RO(hwfifo_enabled, 0);
576 static IIO_DEVICE_ATTR_RO(hwfifo_watermark, 0);
577
578 static const struct iio_dev_attr *kx022a_fifo_attributes[] = {
579 &iio_dev_attr_hwfifo_watermark,
580 &iio_dev_attr_hwfifo_enabled,
581 NULL
582 };
583
kx022a_drop_fifo_contents(struct kx022a_data * data)584 static int kx022a_drop_fifo_contents(struct kx022a_data *data)
585 {
586 /*
587 * We must clear the old time-stamp to avoid computing the timestamps
588 * based on samples acquired when buffer was last enabled.
589 *
590 * We don't need to protect the timestamp as long as we are only
591 * called from fifo-disable where we can guarantee the sensor is not
592 * triggering interrupts and where the mutex is locked to prevent the
593 * user-space access.
594 */
595 data->timestamp = 0;
596
597 return regmap_write(data->regmap, KX022A_REG_BUF_CLEAR, 0x0);
598 }
599
__kx022a_fifo_flush(struct iio_dev * idev,unsigned int samples,bool irq)600 static int __kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples,
601 bool irq)
602 {
603 struct kx022a_data *data = iio_priv(idev);
604 struct device *dev = regmap_get_device(data->regmap);
605 __le16 buffer[KX022A_FIFO_LENGTH * 3];
606 uint64_t sample_period;
607 int count, fifo_bytes;
608 bool renable = false;
609 int64_t tstamp;
610 int ret, i;
611
612 ret = regmap_read(data->regmap, KX022A_REG_BUF_STATUS_1, &fifo_bytes);
613 if (ret) {
614 dev_err(dev, "Error reading buffer status\n");
615 return ret;
616 }
617
618 /* Let's not overflow if we for some reason get bogus value from i2c */
619 if (fifo_bytes == KX022A_FIFO_FULL_VALUE)
620 fifo_bytes = KX022A_FIFO_MAX_BYTES;
621
622 if (fifo_bytes % KX022A_FIFO_SAMPLES_SIZE_BYTES)
623 dev_warn(data->dev, "Bad FIFO alignment. Data may be corrupt\n");
624
625 count = fifo_bytes / KX022A_FIFO_SAMPLES_SIZE_BYTES;
626 if (!count)
627 return 0;
628
629 /*
630 * If we are being called from IRQ handler we know the stored timestamp
631 * is fairly accurate for the last stored sample. Otherwise, if we are
632 * called as a result of a read operation from userspace and hence
633 * before the watermark interrupt was triggered, take a timestamp
634 * now. We can fall anywhere in between two samples so the error in this
635 * case is at most one sample period.
636 */
637 if (!irq) {
638 /*
639 * We need to have the IRQ disabled or we risk of messing-up
640 * the timestamps. If we are ran from IRQ, then the
641 * IRQF_ONESHOT has us covered - but if we are ran by the
642 * user-space read we need to disable the IRQ to be on a safe
643 * side. We do this usng synchronous disable so that if the
644 * IRQ thread is being ran on other CPU we wait for it to be
645 * finished.
646 */
647 disable_irq(data->irq);
648 renable = true;
649
650 data->old_timestamp = data->timestamp;
651 data->timestamp = iio_get_time_ns(idev);
652 }
653
654 /*
655 * Approximate timestamps for each of the sample based on the sampling
656 * frequency, timestamp for last sample and number of samples.
657 *
658 * We'd better not use the current bandwidth settings to compute the
659 * sample period. The real sample rate varies with the device and
660 * small variation adds when we store a large number of samples.
661 *
662 * To avoid this issue we compute the actual sample period ourselves
663 * based on the timestamp delta between the last two flush operations.
664 */
665 if (data->old_timestamp) {
666 sample_period = data->timestamp - data->old_timestamp;
667 do_div(sample_period, count);
668 } else {
669 sample_period = data->odr_ns;
670 }
671 tstamp = data->timestamp - (count - 1) * sample_period;
672
673 if (samples && count > samples) {
674 /*
675 * Here we leave some old samples to the buffer. We need to
676 * adjust the timestamp to match the first sample in the buffer
677 * or we will miscalculate the sample_period at next round.
678 */
679 data->timestamp -= (count - samples) * sample_period;
680 count = samples;
681 }
682
683 fifo_bytes = count * KX022A_FIFO_SAMPLES_SIZE_BYTES;
684 ret = regmap_noinc_read(data->regmap, KX022A_REG_BUF_READ,
685 &buffer[0], fifo_bytes);
686 if (ret)
687 goto renable_out;
688
689 for (i = 0; i < count; i++) {
690 __le16 *sam = &buffer[i * 3];
691 __le16 *chs;
692 int bit;
693
694 chs = &data->scan.channels[0];
695 for_each_set_bit(bit, idev->active_scan_mask, AXIS_MAX)
696 chs[bit] = sam[bit];
697
698 iio_push_to_buffers_with_timestamp(idev, &data->scan, tstamp);
699
700 tstamp += sample_period;
701 }
702
703 ret = count;
704
705 renable_out:
706 if (renable)
707 enable_irq(data->irq);
708
709 return ret;
710 }
711
kx022a_fifo_flush(struct iio_dev * idev,unsigned int samples)712 static int kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples)
713 {
714 struct kx022a_data *data = iio_priv(idev);
715 int ret;
716
717 mutex_lock(&data->mutex);
718 ret = __kx022a_fifo_flush(idev, samples, false);
719 mutex_unlock(&data->mutex);
720
721 return ret;
722 }
723
724 static const struct iio_info kx022a_info = {
725 .read_raw = &kx022a_read_raw,
726 .write_raw = &kx022a_write_raw,
727 .read_avail = &kx022a_read_avail,
728
729 .validate_trigger = kx022a_validate_trigger,
730 .hwfifo_set_watermark = kx022a_set_watermark,
731 .hwfifo_flush_to_buffer = kx022a_fifo_flush,
732 };
733
kx022a_set_drdy_irq(struct kx022a_data * data,bool en)734 static int kx022a_set_drdy_irq(struct kx022a_data *data, bool en)
735 {
736 if (en)
737 return regmap_set_bits(data->regmap, KX022A_REG_CNTL,
738 KX022A_MASK_DRDY);
739
740 return regmap_clear_bits(data->regmap, KX022A_REG_CNTL,
741 KX022A_MASK_DRDY);
742 }
743
kx022a_prepare_irq_pin(struct kx022a_data * data)744 static int kx022a_prepare_irq_pin(struct kx022a_data *data)
745 {
746 /* Enable IRQ1 pin. Set polarity to active low */
747 int mask = KX022A_MASK_IEN | KX022A_MASK_IPOL |
748 KX022A_MASK_ITYP;
749 int val = KX022A_MASK_IEN | KX022A_IPOL_LOW |
750 KX022A_ITYP_LEVEL;
751 int ret;
752
753 ret = regmap_update_bits(data->regmap, data->inc_reg, mask, val);
754 if (ret)
755 return ret;
756
757 /* We enable WMI to IRQ pin only at buffer_enable */
758 mask = KX022A_MASK_INS2_DRDY;
759
760 return regmap_set_bits(data->regmap, data->ien_reg, mask);
761 }
762
kx022a_fifo_disable(struct kx022a_data * data)763 static int kx022a_fifo_disable(struct kx022a_data *data)
764 {
765 int ret = 0;
766
767 ret = kx022a_turn_off_lock(data);
768 if (ret)
769 return ret;
770
771 ret = regmap_clear_bits(data->regmap, data->ien_reg, KX022A_MASK_WMI);
772 if (ret)
773 goto unlock_out;
774
775 ret = regmap_clear_bits(data->regmap, KX022A_REG_BUF_CNTL2,
776 KX022A_MASK_BUF_EN);
777 if (ret)
778 goto unlock_out;
779
780 data->state &= ~KX022A_STATE_FIFO;
781
782 kx022a_drop_fifo_contents(data);
783
784 return kx022a_turn_on_unlock(data);
785
786 unlock_out:
787 mutex_unlock(&data->mutex);
788
789 return ret;
790 }
791
kx022a_buffer_predisable(struct iio_dev * idev)792 static int kx022a_buffer_predisable(struct iio_dev *idev)
793 {
794 struct kx022a_data *data = iio_priv(idev);
795
796 if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED)
797 return 0;
798
799 return kx022a_fifo_disable(data);
800 }
801
kx022a_fifo_enable(struct kx022a_data * data)802 static int kx022a_fifo_enable(struct kx022a_data *data)
803 {
804 int ret;
805
806 ret = kx022a_turn_off_lock(data);
807 if (ret)
808 return ret;
809
810 /* Update watermark to HW */
811 ret = kx022a_fifo_set_wmi(data);
812 if (ret)
813 goto unlock_out;
814
815 /* Enable buffer */
816 ret = regmap_set_bits(data->regmap, KX022A_REG_BUF_CNTL2,
817 KX022A_MASK_BUF_EN);
818 if (ret)
819 goto unlock_out;
820
821 data->state |= KX022A_STATE_FIFO;
822 ret = regmap_set_bits(data->regmap, data->ien_reg,
823 KX022A_MASK_WMI);
824 if (ret)
825 goto unlock_out;
826
827 return kx022a_turn_on_unlock(data);
828
829 unlock_out:
830 mutex_unlock(&data->mutex);
831
832 return ret;
833 }
834
kx022a_buffer_postenable(struct iio_dev * idev)835 static int kx022a_buffer_postenable(struct iio_dev *idev)
836 {
837 struct kx022a_data *data = iio_priv(idev);
838
839 /*
840 * If we use data-ready trigger, then the IRQ masks should be handled by
841 * trigger enable and the hardware buffer is not used but we just update
842 * results to the IIO fifo when data-ready triggers.
843 */
844 if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED)
845 return 0;
846
847 return kx022a_fifo_enable(data);
848 }
849
850 static const struct iio_buffer_setup_ops kx022a_buffer_ops = {
851 .postenable = kx022a_buffer_postenable,
852 .predisable = kx022a_buffer_predisable,
853 };
854
kx022a_trigger_handler(int irq,void * p)855 static irqreturn_t kx022a_trigger_handler(int irq, void *p)
856 {
857 struct iio_poll_func *pf = p;
858 struct iio_dev *idev = pf->indio_dev;
859 struct kx022a_data *data = iio_priv(idev);
860 int ret;
861
862 ret = regmap_bulk_read(data->regmap, KX022A_REG_XOUT_L, data->buffer,
863 KX022A_FIFO_SAMPLES_SIZE_BYTES);
864 if (ret < 0)
865 goto err_read;
866
867 iio_push_to_buffers_with_timestamp(idev, data->buffer, pf->timestamp);
868 err_read:
869 iio_trigger_notify_done(idev->trig);
870
871 return IRQ_HANDLED;
872 }
873
874 /* Get timestamps and wake the thread if we need to read data */
kx022a_irq_handler(int irq,void * private)875 static irqreturn_t kx022a_irq_handler(int irq, void *private)
876 {
877 struct iio_dev *idev = private;
878 struct kx022a_data *data = iio_priv(idev);
879
880 data->old_timestamp = data->timestamp;
881 data->timestamp = iio_get_time_ns(idev);
882
883 if (data->state & KX022A_STATE_FIFO || data->trigger_enabled)
884 return IRQ_WAKE_THREAD;
885
886 return IRQ_NONE;
887 }
888
889 /*
890 * WMI and data-ready IRQs are acked when results are read. If we add
891 * TILT/WAKE or other IRQs - then we may need to implement the acking
892 * (which is racy).
893 */
kx022a_irq_thread_handler(int irq,void * private)894 static irqreturn_t kx022a_irq_thread_handler(int irq, void *private)
895 {
896 struct iio_dev *idev = private;
897 struct kx022a_data *data = iio_priv(idev);
898 irqreturn_t ret = IRQ_NONE;
899
900 mutex_lock(&data->mutex);
901
902 if (data->trigger_enabled) {
903 iio_trigger_poll_chained(data->trig);
904 ret = IRQ_HANDLED;
905 }
906
907 if (data->state & KX022A_STATE_FIFO) {
908 int ok;
909
910 ok = __kx022a_fifo_flush(idev, KX022A_FIFO_LENGTH, true);
911 if (ok > 0)
912 ret = IRQ_HANDLED;
913 }
914
915 mutex_unlock(&data->mutex);
916
917 return ret;
918 }
919
kx022a_trigger_set_state(struct iio_trigger * trig,bool state)920 static int kx022a_trigger_set_state(struct iio_trigger *trig,
921 bool state)
922 {
923 struct kx022a_data *data = iio_trigger_get_drvdata(trig);
924 int ret = 0;
925
926 mutex_lock(&data->mutex);
927
928 if (data->trigger_enabled == state)
929 goto unlock_out;
930
931 if (data->state & KX022A_STATE_FIFO) {
932 dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
933 ret = -EBUSY;
934 goto unlock_out;
935 }
936
937 ret = kx022a_turn_on_off_unlocked(data, false);
938 if (ret)
939 goto unlock_out;
940
941 data->trigger_enabled = state;
942 ret = kx022a_set_drdy_irq(data, state);
943 if (ret)
944 goto unlock_out;
945
946 ret = kx022a_turn_on_off_unlocked(data, true);
947
948 unlock_out:
949 mutex_unlock(&data->mutex);
950
951 return ret;
952 }
953
954 static const struct iio_trigger_ops kx022a_trigger_ops = {
955 .set_trigger_state = kx022a_trigger_set_state,
956 };
957
kx022a_chip_init(struct kx022a_data * data)958 static int kx022a_chip_init(struct kx022a_data *data)
959 {
960 int ret, val;
961
962 /* Reset the senor */
963 ret = regmap_write(data->regmap, KX022A_REG_CNTL2, KX022A_MASK_SRST);
964 if (ret)
965 return ret;
966
967 /*
968 * I've seen I2C read failures if we poll too fast after the sensor
969 * reset. Slight delay gives I2C block the time to recover.
970 */
971 msleep(1);
972
973 ret = regmap_read_poll_timeout(data->regmap, KX022A_REG_CNTL2, val,
974 !(val & KX022A_MASK_SRST),
975 KX022A_SOFT_RESET_WAIT_TIME_US,
976 KX022A_SOFT_RESET_TOTAL_WAIT_TIME_US);
977 if (ret) {
978 dev_err(data->dev, "Sensor reset %s\n",
979 val & KX022A_MASK_SRST ? "timeout" : "fail#");
980 return ret;
981 }
982
983 ret = regmap_reinit_cache(data->regmap, &kx022a_regmap);
984 if (ret) {
985 dev_err(data->dev, "Failed to reinit reg cache\n");
986 return ret;
987 }
988
989 /* set data res 16bit */
990 ret = regmap_set_bits(data->regmap, KX022A_REG_BUF_CNTL2,
991 KX022A_MASK_BRES16);
992 if (ret) {
993 dev_err(data->dev, "Failed to set data resolution\n");
994 return ret;
995 }
996
997 return kx022a_prepare_irq_pin(data);
998 }
999
kx022a_probe_internal(struct device * dev)1000 int kx022a_probe_internal(struct device *dev)
1001 {
1002 static const char * const regulator_names[] = {"io-vdd", "vdd"};
1003 struct iio_trigger *indio_trig;
1004 struct fwnode_handle *fwnode;
1005 struct kx022a_data *data;
1006 struct regmap *regmap;
1007 unsigned int chip_id;
1008 struct iio_dev *idev;
1009 int ret, irq;
1010 char *name;
1011
1012 regmap = dev_get_regmap(dev, NULL);
1013 if (!regmap) {
1014 dev_err(dev, "no regmap\n");
1015 return -EINVAL;
1016 }
1017
1018 fwnode = dev_fwnode(dev);
1019 if (!fwnode)
1020 return -ENODEV;
1021
1022 idev = devm_iio_device_alloc(dev, sizeof(*data));
1023 if (!idev)
1024 return -ENOMEM;
1025
1026 data = iio_priv(idev);
1027
1028 /*
1029 * VDD is the analog and digital domain voltage supply and
1030 * IO_VDD is the digital I/O voltage supply.
1031 */
1032 ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
1033 regulator_names);
1034 if (ret && ret != -ENODEV)
1035 return dev_err_probe(dev, ret, "failed to enable regulator\n");
1036
1037 ret = regmap_read(regmap, KX022A_REG_WHO, &chip_id);
1038 if (ret)
1039 return dev_err_probe(dev, ret, "Failed to access sensor\n");
1040
1041 if (chip_id != KX022A_ID) {
1042 dev_err(dev, "unsupported device 0x%x\n", chip_id);
1043 return -EINVAL;
1044 }
1045
1046 irq = fwnode_irq_get_byname(fwnode, "INT1");
1047 if (irq > 0) {
1048 data->inc_reg = KX022A_REG_INC1;
1049 data->ien_reg = KX022A_REG_INC4;
1050 } else {
1051 irq = fwnode_irq_get_byname(fwnode, "INT2");
1052 if (irq <= 0)
1053 return dev_err_probe(dev, irq, "No suitable IRQ\n");
1054
1055 data->inc_reg = KX022A_REG_INC5;
1056 data->ien_reg = KX022A_REG_INC6;
1057 }
1058
1059 data->regmap = regmap;
1060 data->dev = dev;
1061 data->irq = irq;
1062 data->odr_ns = KX022A_DEFAULT_PERIOD_NS;
1063 mutex_init(&data->mutex);
1064
1065 idev->channels = kx022a_channels;
1066 idev->num_channels = ARRAY_SIZE(kx022a_channels);
1067 idev->name = "kx022-accel";
1068 idev->info = &kx022a_info;
1069 idev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
1070 idev->available_scan_masks = kx022a_scan_masks;
1071
1072 /* Read the mounting matrix, if present */
1073 ret = iio_read_mount_matrix(dev, &data->orientation);
1074 if (ret)
1075 return ret;
1076
1077 /* The sensor must be turned off for configuration */
1078 ret = kx022a_turn_off_lock(data);
1079 if (ret)
1080 return ret;
1081
1082 ret = kx022a_chip_init(data);
1083 if (ret) {
1084 mutex_unlock(&data->mutex);
1085 return ret;
1086 }
1087
1088 ret = kx022a_turn_on_unlock(data);
1089 if (ret)
1090 return ret;
1091
1092 ret = devm_iio_triggered_buffer_setup_ext(dev, idev,
1093 &iio_pollfunc_store_time,
1094 kx022a_trigger_handler,
1095 IIO_BUFFER_DIRECTION_IN,
1096 &kx022a_buffer_ops,
1097 kx022a_fifo_attributes);
1098
1099 if (ret)
1100 return dev_err_probe(data->dev, ret,
1101 "iio_triggered_buffer_setup_ext FAIL\n");
1102 indio_trig = devm_iio_trigger_alloc(dev, "%sdata-rdy-dev%d", idev->name,
1103 iio_device_id(idev));
1104 if (!indio_trig)
1105 return -ENOMEM;
1106
1107 data->trig = indio_trig;
1108
1109 indio_trig->ops = &kx022a_trigger_ops;
1110 iio_trigger_set_drvdata(indio_trig, data);
1111
1112 /*
1113 * No need to check for NULL. request_threaded_irq() defaults to
1114 * dev_name() should the alloc fail.
1115 */
1116 name = devm_kasprintf(data->dev, GFP_KERNEL, "%s-kx022a",
1117 dev_name(data->dev));
1118
1119 ret = devm_request_threaded_irq(data->dev, irq, kx022a_irq_handler,
1120 &kx022a_irq_thread_handler,
1121 IRQF_ONESHOT, name, idev);
1122 if (ret)
1123 return dev_err_probe(data->dev, ret, "Could not request IRQ\n");
1124
1125
1126 ret = devm_iio_trigger_register(dev, indio_trig);
1127 if (ret)
1128 return dev_err_probe(data->dev, ret,
1129 "Trigger registration failed\n");
1130
1131 ret = devm_iio_device_register(data->dev, idev);
1132 if (ret < 0)
1133 return dev_err_probe(dev, ret,
1134 "Unable to register iio device\n");
1135
1136 return ret;
1137 }
1138 EXPORT_SYMBOL_NS_GPL(kx022a_probe_internal, IIO_KX022A);
1139
1140 MODULE_DESCRIPTION("ROHM/Kionix KX022A accelerometer driver");
1141 MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
1142 MODULE_LICENSE("GPL");
1143