This is a test repository.
Revision | 0a85ed6e7fce8075bb3090f8eac05ca1000f5969 (tree) |
---|---|
Time | 2020-05-11 03:16:07 |
Author | Linus Torvalds <torvalds@linu...> |
Commiter | Linus Torvalds |
Merge tag 'block-5.7-2020-05-09' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
* tag 'block-5.7-2020-05-09' of git://git.kernel.dk/linux-block:
@@ -123,6 +123,7 @@ | ||
123 | 123 | #include <linux/ioprio.h> |
124 | 124 | #include <linux/sbitmap.h> |
125 | 125 | #include <linux/delay.h> |
126 | +#include <linux/backing-dev.h> | |
126 | 127 | |
127 | 128 | #include "blk.h" |
128 | 129 | #include "blk-mq.h" |
@@ -4976,8 +4977,9 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) | ||
4976 | 4977 | ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); |
4977 | 4978 | switch (ioprio_class) { |
4978 | 4979 | default: |
4979 | - dev_err(bfqq->bfqd->queue->backing_dev_info->dev, | |
4980 | - "bfq: bad prio class %d\n", ioprio_class); | |
4980 | + pr_err("bdi %s: bfq: bad prio class %d\n", | |
4981 | + bdi_dev_name(bfqq->bfqd->queue->backing_dev_info), | |
4982 | + ioprio_class); | |
4981 | 4983 | /* fall through */ |
4982 | 4984 | case IOPRIO_CLASS_NONE: |
4983 | 4985 | /* |
@@ -496,7 +496,7 @@ const char *blkg_dev_name(struct blkcg_gq *blkg) | ||
496 | 496 | { |
497 | 497 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
498 | 498 | if (blkg->q->backing_dev_info->dev) |
499 | - return dev_name(blkg->q->backing_dev_info->dev); | |
499 | + return bdi_dev_name(blkg->q->backing_dev_info); | |
500 | 500 | return NULL; |
501 | 501 | } |
502 | 502 |
@@ -466,7 +466,7 @@ struct ioc_gq { | ||
466 | 466 | */ |
467 | 467 | atomic64_t vtime; |
468 | 468 | atomic64_t done_vtime; |
469 | - atomic64_t abs_vdebt; | |
469 | + u64 abs_vdebt; | |
470 | 470 | u64 last_vtime; |
471 | 471 | |
472 | 472 | /* |
@@ -1142,7 +1142,7 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) | ||
1142 | 1142 | struct iocg_wake_ctx ctx = { .iocg = iocg }; |
1143 | 1143 | u64 margin_ns = (u64)(ioc->period_us * |
1144 | 1144 | WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC; |
1145 | - u64 abs_vdebt, vdebt, vshortage, expires, oexpires; | |
1145 | + u64 vdebt, vshortage, expires, oexpires; | |
1146 | 1146 | s64 vbudget; |
1147 | 1147 | u32 hw_inuse; |
1148 | 1148 |
@@ -1152,18 +1152,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) | ||
1152 | 1152 | vbudget = now->vnow - atomic64_read(&iocg->vtime); |
1153 | 1153 | |
1154 | 1154 | /* pay off debt */ |
1155 | - abs_vdebt = atomic64_read(&iocg->abs_vdebt); | |
1156 | - vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse); | |
1155 | + vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); | |
1157 | 1156 | if (vdebt && vbudget > 0) { |
1158 | 1157 | u64 delta = min_t(u64, vbudget, vdebt); |
1159 | 1158 | u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse), |
1160 | - abs_vdebt); | |
1159 | + iocg->abs_vdebt); | |
1161 | 1160 | |
1162 | 1161 | atomic64_add(delta, &iocg->vtime); |
1163 | 1162 | atomic64_add(delta, &iocg->done_vtime); |
1164 | - atomic64_sub(abs_delta, &iocg->abs_vdebt); | |
1165 | - if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0)) | |
1166 | - atomic64_set(&iocg->abs_vdebt, 0); | |
1163 | + iocg->abs_vdebt -= abs_delta; | |
1167 | 1164 | } |
1168 | 1165 | |
1169 | 1166 | /* |
@@ -1219,12 +1216,18 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) | ||
1219 | 1216 | u64 expires, oexpires; |
1220 | 1217 | u32 hw_inuse; |
1221 | 1218 | |
1219 | + lockdep_assert_held(&iocg->waitq.lock); | |
1220 | + | |
1222 | 1221 | /* debt-adjust vtime */ |
1223 | 1222 | current_hweight(iocg, NULL, &hw_inuse); |
1224 | - vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse); | |
1223 | + vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); | |
1225 | 1224 | |
1226 | - /* clear or maintain depending on the overage */ | |
1227 | - if (time_before_eq64(vtime, now->vnow)) { | |
1225 | + /* | |
1226 | + * Clear or maintain depending on the overage. Non-zero vdebt is what | |
1227 | + * guarantees that @iocg is online and future iocg_kick_delay() will | |
1228 | + * clear use_delay. Don't leave it on when there's no vdebt. | |
1229 | + */ | |
1230 | + if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) { | |
1228 | 1231 | blkcg_clear_delay(blkg); |
1229 | 1232 | return false; |
1230 | 1233 | } |
@@ -1258,9 +1261,12 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) | ||
1258 | 1261 | { |
1259 | 1262 | struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer); |
1260 | 1263 | struct ioc_now now; |
1264 | + unsigned long flags; | |
1261 | 1265 | |
1266 | + spin_lock_irqsave(&iocg->waitq.lock, flags); | |
1262 | 1267 | ioc_now(iocg->ioc, &now); |
1263 | 1268 | iocg_kick_delay(iocg, &now, 0); |
1269 | + spin_unlock_irqrestore(&iocg->waitq.lock, flags); | |
1264 | 1270 | |
1265 | 1271 | return HRTIMER_NORESTART; |
1266 | 1272 | } |
@@ -1368,14 +1374,13 @@ static void ioc_timer_fn(struct timer_list *timer) | ||
1368 | 1374 | * should have woken up in the last period and expire idle iocgs. |
1369 | 1375 | */ |
1370 | 1376 | list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { |
1371 | - if (!waitqueue_active(&iocg->waitq) && | |
1372 | - !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg)) | |
1377 | + if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt && | |
1378 | + !iocg_is_idle(iocg)) | |
1373 | 1379 | continue; |
1374 | 1380 | |
1375 | 1381 | spin_lock(&iocg->waitq.lock); |
1376 | 1382 | |
1377 | - if (waitqueue_active(&iocg->waitq) || | |
1378 | - atomic64_read(&iocg->abs_vdebt)) { | |
1383 | + if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) { | |
1379 | 1384 | /* might be oversleeping vtime / hweight changes, kick */ |
1380 | 1385 | iocg_kick_waitq(iocg, &now); |
1381 | 1386 | iocg_kick_delay(iocg, &now, 0); |
@@ -1718,28 +1723,49 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) | ||
1718 | 1723 | * tests are racy but the races aren't systemic - we only miss once |
1719 | 1724 | * in a while which is fine. |
1720 | 1725 | */ |
1721 | - if (!waitqueue_active(&iocg->waitq) && | |
1722 | - !atomic64_read(&iocg->abs_vdebt) && | |
1726 | + if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && | |
1723 | 1727 | time_before_eq64(vtime + cost, now.vnow)) { |
1724 | 1728 | iocg_commit_bio(iocg, bio, cost); |
1725 | 1729 | return; |
1726 | 1730 | } |
1727 | 1731 | |
1728 | 1732 | /* |
1729 | - * We're over budget. If @bio has to be issued regardless, | |
1730 | - * remember the abs_cost instead of advancing vtime. | |
1731 | - * iocg_kick_waitq() will pay off the debt before waking more IOs. | |
1733 | + * We activated above but w/o any synchronization. Deactivation is | |
1734 | + * synchronized with waitq.lock and we won't get deactivated as long | |
1735 | + * as we're waiting or has debt, so we're good if we're activated | |
1736 | + * here. In the unlikely case that we aren't, just issue the IO. | |
1737 | + */ | |
1738 | + spin_lock_irq(&iocg->waitq.lock); | |
1739 | + | |
1740 | + if (unlikely(list_empty(&iocg->active_list))) { | |
1741 | + spin_unlock_irq(&iocg->waitq.lock); | |
1742 | + iocg_commit_bio(iocg, bio, cost); | |
1743 | + return; | |
1744 | + } | |
1745 | + | |
1746 | + /* | |
1747 | + * We're over budget. If @bio has to be issued regardless, remember | |
1748 | + * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay | |
1749 | + * off the debt before waking more IOs. | |
1750 | + * | |
1732 | 1751 | * This way, the debt is continuously paid off each period with the |
1733 | - * actual budget available to the cgroup. If we just wound vtime, | |
1734 | - * we would incorrectly use the current hw_inuse for the entire | |
1735 | - * amount which, for example, can lead to the cgroup staying | |
1736 | - * blocked for a long time even with substantially raised hw_inuse. | |
1752 | + * actual budget available to the cgroup. If we just wound vtime, we | |
1753 | + * would incorrectly use the current hw_inuse for the entire amount | |
1754 | + * which, for example, can lead to the cgroup staying blocked for a | |
1755 | + * long time even with substantially raised hw_inuse. | |
1756 | + * | |
1757 | + * An iocg with vdebt should stay online so that the timer can keep | |
1758 | + * deducting its vdebt and [de]activate use_delay mechanism | |
1759 | + * accordingly. We don't want to race against the timer trying to | |
1760 | + * clear them and leave @iocg inactive w/ dangling use_delay heavily | |
1761 | + * penalizing the cgroup and its descendants. | |
1737 | 1762 | */ |
1738 | 1763 | if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { |
1739 | - atomic64_add(abs_cost, &iocg->abs_vdebt); | |
1764 | + iocg->abs_vdebt += abs_cost; | |
1740 | 1765 | if (iocg_kick_delay(iocg, &now, cost)) |
1741 | 1766 | blkcg_schedule_throttle(rqos->q, |
1742 | 1767 | (bio->bi_opf & REQ_SWAP) == REQ_SWAP); |
1768 | + spin_unlock_irq(&iocg->waitq.lock); | |
1743 | 1769 | return; |
1744 | 1770 | } |
1745 | 1771 |
@@ -1756,20 +1782,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) | ||
1756 | 1782 | * All waiters are on iocg->waitq and the wait states are |
1757 | 1783 | * synchronized using waitq.lock. |
1758 | 1784 | */ |
1759 | - spin_lock_irq(&iocg->waitq.lock); | |
1760 | - | |
1761 | - /* | |
1762 | - * We activated above but w/o any synchronization. Deactivation is | |
1763 | - * synchronized with waitq.lock and we won't get deactivated as | |
1764 | - * long as we're waiting, so we're good if we're activated here. | |
1765 | - * In the unlikely case that we are deactivated, just issue the IO. | |
1766 | - */ | |
1767 | - if (unlikely(list_empty(&iocg->active_list))) { | |
1768 | - spin_unlock_irq(&iocg->waitq.lock); | |
1769 | - iocg_commit_bio(iocg, bio, cost); | |
1770 | - return; | |
1771 | - } | |
1772 | - | |
1773 | 1785 | init_waitqueue_func_entry(&wait.wait, iocg_wake_fn); |
1774 | 1786 | wait.wait.private = current; |
1775 | 1787 | wait.bio = bio; |
@@ -1801,6 +1813,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, | ||
1801 | 1813 | struct ioc_now now; |
1802 | 1814 | u32 hw_inuse; |
1803 | 1815 | u64 abs_cost, cost; |
1816 | + unsigned long flags; | |
1804 | 1817 | |
1805 | 1818 | /* bypass if disabled or for root cgroup */ |
1806 | 1819 | if (!ioc->enabled || !iocg->level) |
@@ -1820,15 +1833,28 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, | ||
1820 | 1833 | iocg->cursor = bio_end; |
1821 | 1834 | |
1822 | 1835 | /* |
1823 | - * Charge if there's enough vtime budget and the existing request | |
1824 | - * has cost assigned. Otherwise, account it as debt. See debt | |
1825 | - * handling in ioc_rqos_throttle() for details. | |
1836 | + * Charge if there's enough vtime budget and the existing request has | |
1837 | + * cost assigned. | |
1826 | 1838 | */ |
1827 | 1839 | if (rq->bio && rq->bio->bi_iocost_cost && |
1828 | - time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) | |
1840 | + time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) { | |
1829 | 1841 | iocg_commit_bio(iocg, bio, cost); |
1830 | - else | |
1831 | - atomic64_add(abs_cost, &iocg->abs_vdebt); | |
1842 | + return; | |
1843 | + } | |
1844 | + | |
1845 | + /* | |
1846 | + * Otherwise, account it as debt if @iocg is online, which it should | |
1847 | + * be for the vast majority of cases. See debt handling in | |
1848 | + * ioc_rqos_throttle() for details. | |
1849 | + */ | |
1850 | + spin_lock_irqsave(&iocg->waitq.lock, flags); | |
1851 | + if (likely(!list_empty(&iocg->active_list))) { | |
1852 | + iocg->abs_vdebt += abs_cost; | |
1853 | + iocg_kick_delay(iocg, &now, cost); | |
1854 | + } else { | |
1855 | + iocg_commit_bio(iocg, bio, cost); | |
1856 | + } | |
1857 | + spin_unlock_irqrestore(&iocg->waitq.lock, flags); | |
1832 | 1858 | } |
1833 | 1859 | |
1834 | 1860 | static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) |
@@ -1998,7 +2024,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd) | ||
1998 | 2024 | iocg->ioc = ioc; |
1999 | 2025 | atomic64_set(&iocg->vtime, now.vnow); |
2000 | 2026 | atomic64_set(&iocg->done_vtime, now.vnow); |
2001 | - atomic64_set(&iocg->abs_vdebt, 0); | |
2002 | 2027 | atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); |
2003 | 2028 | INIT_LIST_HEAD(&iocg->active_list); |
2004 | 2029 | iocg->hweight_active = HWEIGHT_WHOLE; |
@@ -1110,7 +1110,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, | ||
1110 | 1110 | * Don't treat an error as fatal, as we potentially already |
1111 | 1111 | * have a NGUID or EUI-64. |
1112 | 1112 | */ |
1113 | - if (status > 0) | |
1113 | + if (status > 0 && !(status & NVME_SC_DNR)) | |
1114 | 1114 | status = 0; |
1115 | 1115 | goto free_data; |
1116 | 1116 | } |
@@ -973,9 +973,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) | ||
973 | 973 | |
974 | 974 | static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) |
975 | 975 | { |
976 | - if (++nvmeq->cq_head == nvmeq->q_depth) { | |
976 | + u16 tmp = nvmeq->cq_head + 1; | |
977 | + | |
978 | + if (tmp == nvmeq->q_depth) { | |
977 | 979 | nvmeq->cq_head = 0; |
978 | 980 | nvmeq->cq_phase ^= 1; |
981 | + } else { | |
982 | + nvmeq->cq_head = tmp; | |
979 | 983 | } |
980 | 984 | } |
981 | 985 |
@@ -271,7 +271,7 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc) | ||
271 | 271 | &congestion_kb_fops); |
272 | 272 | |
273 | 273 | snprintf(name, sizeof(name), "../../bdi/%s", |
274 | - dev_name(fsc->sb->s_bdi->dev)); | |
274 | + bdi_dev_name(fsc->sb->s_bdi)); | |
275 | 275 | fsc->debugfs_bdi = |
276 | 276 | debugfs_create_symlink("bdi", |
277 | 277 | fsc->client->debugfs_dir, |
@@ -164,7 +164,7 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc) | ||
164 | 164 | goto fail_free; |
165 | 165 | } |
166 | 166 | |
167 | - err = super_setup_bdi_name(sb, "vboxsf-%s.%d", fc->source, sbi->bdi_id); | |
167 | + err = super_setup_bdi_name(sb, "vboxsf-%d", sbi->bdi_id); | |
168 | 168 | if (err) |
169 | 169 | goto fail_free; |
170 | 170 |
@@ -219,6 +219,7 @@ struct backing_dev_info { | ||
219 | 219 | wait_queue_head_t wb_waitq; |
220 | 220 | |
221 | 221 | struct device *dev; |
222 | + char dev_name[64]; | |
222 | 223 | struct device *owner; |
223 | 224 | |
224 | 225 | struct timer_list laptop_mode_wb_timer; |
@@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) | ||
505 | 505 | (1 << WB_async_congested)); |
506 | 506 | } |
507 | 507 | |
508 | -extern const char *bdi_unknown_name; | |
509 | - | |
510 | -static inline const char *bdi_dev_name(struct backing_dev_info *bdi) | |
511 | -{ | |
512 | - if (!bdi || !bdi->dev) | |
513 | - return bdi_unknown_name; | |
514 | - return dev_name(bdi->dev); | |
515 | -} | |
508 | +const char *bdi_dev_name(struct backing_dev_info *bdi); | |
516 | 509 | |
517 | 510 | #endif /* _LINUX_BACKING_DEV_H */ |
@@ -33,7 +33,7 @@ TRACE_EVENT(wbt_stat, | ||
33 | 33 | ), |
34 | 34 | |
35 | 35 | TP_fast_assign( |
36 | - strlcpy(__entry->name, dev_name(bdi->dev), | |
36 | + strlcpy(__entry->name, bdi_dev_name(bdi), | |
37 | 37 | ARRAY_SIZE(__entry->name)); |
38 | 38 | __entry->rmean = stat[0].mean; |
39 | 39 | __entry->rmin = stat[0].min; |
@@ -68,7 +68,7 @@ TRACE_EVENT(wbt_lat, | ||
68 | 68 | ), |
69 | 69 | |
70 | 70 | TP_fast_assign( |
71 | - strlcpy(__entry->name, dev_name(bdi->dev), | |
71 | + strlcpy(__entry->name, bdi_dev_name(bdi), | |
72 | 72 | ARRAY_SIZE(__entry->name)); |
73 | 73 | __entry->lat = div_u64(lat, 1000); |
74 | 74 | ), |
@@ -105,7 +105,7 @@ TRACE_EVENT(wbt_step, | ||
105 | 105 | ), |
106 | 106 | |
107 | 107 | TP_fast_assign( |
108 | - strlcpy(__entry->name, dev_name(bdi->dev), | |
108 | + strlcpy(__entry->name, bdi_dev_name(bdi), | |
109 | 109 | ARRAY_SIZE(__entry->name)); |
110 | 110 | __entry->msg = msg; |
111 | 111 | __entry->step = step; |
@@ -141,7 +141,7 @@ TRACE_EVENT(wbt_timer, | ||
141 | 141 | ), |
142 | 142 | |
143 | 143 | TP_fast_assign( |
144 | - strlcpy(__entry->name, dev_name(bdi->dev), | |
144 | + strlcpy(__entry->name, bdi_dev_name(bdi), | |
145 | 145 | ARRAY_SIZE(__entry->name)); |
146 | 146 | __entry->status = status; |
147 | 147 | __entry->step = step; |
@@ -21,7 +21,7 @@ struct backing_dev_info noop_backing_dev_info = { | ||
21 | 21 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); |
22 | 22 | |
23 | 23 | static struct class *bdi_class; |
24 | -const char *bdi_unknown_name = "(unknown)"; | |
24 | +static const char *bdi_unknown_name = "(unknown)"; | |
25 | 25 | |
26 | 26 | /* |
27 | 27 | * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU |
@@ -938,7 +938,8 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) | ||
938 | 938 | if (bdi->dev) /* The driver needs to use separate queues per device */ |
939 | 939 | return 0; |
940 | 940 | |
941 | - dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args); | |
941 | + vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); | |
942 | + dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); | |
942 | 943 | if (IS_ERR(dev)) |
943 | 944 | return PTR_ERR(dev); |
944 | 945 |
@@ -1043,6 +1044,14 @@ void bdi_put(struct backing_dev_info *bdi) | ||
1043 | 1044 | } |
1044 | 1045 | EXPORT_SYMBOL(bdi_put); |
1045 | 1046 | |
1047 | +const char *bdi_dev_name(struct backing_dev_info *bdi) | |
1048 | +{ | |
1049 | + if (!bdi || !bdi->dev) | |
1050 | + return bdi_unknown_name; | |
1051 | + return bdi->dev_name; | |
1052 | +} | |
1053 | +EXPORT_SYMBOL_GPL(bdi_dev_name); | |
1054 | + | |
1046 | 1055 | static wait_queue_head_t congestion_wqh[2] = { |
1047 | 1056 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
1048 | 1057 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
@@ -159,7 +159,12 @@ class IocgStat: | ||
159 | 159 | else: |
160 | 160 | self.inflight_pct = 0 |
161 | 161 | |
162 | - self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000 | |
162 | + # vdebt used to be an atomic64_t and is now u64, support both | |
163 | + try: | |
164 | + self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000 | |
165 | + except: | |
166 | + self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000 | |
167 | + | |
163 | 168 | self.use_delay = blkg.use_delay.counter.value_() |
164 | 169 | self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000 |
165 | 170 |