source: trunk/anuga_core/source/anuga/utilities/cg_solve.py @ 8710

Last change on this file since 8710 was 8690, checked in by steve, 12 years ago

Rolling in Padarn's update to fit_interpolation

File size: 9.6 KB
Line 
1import exceptions
2class VectorShapeError(exceptions.Exception): pass
3class ConvergenceError(exceptions.Exception): pass
4class PreconditionerError(exceptions.Exception): pass
5
6import numpy as num
7
8import anuga.utilities.log as log
9from anuga.utilities.sparse import Sparse, Sparse_CSR
10
11# Setup for C conjugate gradient solver
12from anuga.utilities import compile
13if compile.can_use_C_extension('cg_ext.c'):
14    from cg_ext import cg_solve_c
15    from cg_ext import cg_solve_c_precon
16    from cg_ext import jacobi_precon_c
17else: 
18    msg = "C implementation of conjugate gradient (cg_ext.c) not avalaible"
19    raise Exception(msg)
20
21class Stats:
22
23    def __init__(self):
24
25        self.iter = None
26        self.rTr = None
27        self.dx = None
28        self.rTr0 = None
29        self.x = None
30        self.x0 = None
31
32    def __str__(self):
33        msg = ' iter %.5g rTr %.5g x %.5g dx %.5g rTr0 %.5g x0 %.5g' \
34              % (self.iter, self.rTr, self.x, self.dx, self.rTr0, self.x0)
35        return msg
36
37# Note Padarn 26/11/12: This function has been modified to include an
38# additional argument 'use_c_cg' solve, which instead of using the current
39# python implementation calls a c implementation of the cg algorithm. This
40# has not been tested when trying to perform the cg routine on multiple
41# quantities, but should work. No stats are currently returned by the c
42# function.
43# Note Padarn 26/11/12: Further note that to use the c routine, the matrix
44# A must currently be in the sparse_csr format implemented in anuga.util.sparse
45
46# Test that matrix is in correct format if c routine is being called
47def conjugate_gradient(A, b, x0=None, imax=10000, tol=1.0e-8, atol=1.0e-14,
48                        iprint=None, output_stats=False, use_c_cg=False, precon='None'):
49
50    """
51    Try to solve linear equation Ax = b using
52    conjugate gradient method
53
54    If b is an array, solve it as if it was a set of vectors, solving each
55    vector.
56    """
57   
58    if use_c_cg:
59        from anuga.utilities.sparse import Sparse_CSR
60        msg = ('c implementation of conjugate gradient requires that matrix A\
61                be of type %s') % (str(Sparse_CSR))
62        assert isinstance(A, Sparse_CSR), msg
63
64    if x0 is None:
65        x0 = num.zeros(b.shape, dtype=num.float)
66    else:
67        x0 = num.array(x0, dtype=num.float)
68
69    b = num.array(b, dtype=num.float)
70
71    err = 0
72
73    # preconditioner
74    # Padarn Note: currently a fairly lazy implementation, needs fixing
75    M = None
76    if precon == 'Jacobi':
77
78        M = num.zeros(b.shape[0])
79        jacobi_precon_c(A, M)
80        x0 = b.copy()     
81
82        if len(b.shape) != 1:   
83
84            for i in range(b.shape[1]): 
85
86                if not use_c_cg:
87                    x0[:, i], stats = _conjugate_gradient_preconditioned(A, b[:, i], x0[:, i], M,
88                                               imax, tol, atol, iprint, Type="Jacobi")
89                else:
90                    # need to copy into new array to ensure contiguous access
91                    xnew = x0[:, i].copy()
92                    err = cg_solve_c_precon(A, xnew, b[:, i].copy(), imax, tol, atol, b.shape[1], M)
93                    x0[:, i] = xnew
94        else:
95
96            if not use_c_cg:
97                x0, stats = _conjugate_gradient_preconditioned(A, b, x0, M, imax, tol, atol, iprint, Type="Jacobi")
98            else:
99                err = cg_solve_c_precon(A, x0, b, imax, tol, atol, 1, M)
100
101    else:
102
103        if len(b.shape) != 1:
104
105            for i in range(b.shape[1]):
106
107                if not use_c_cg:
108                    x0[:, i], stats = _conjugate_gradient(A, b[:, i], x0[:, i],
109                                               imax, tol, atol, iprint)
110                else:
111                    # need to copy into new array to ensure contiguous access
112                    xnew = x0[:, i].copy()
113                    err = cg_solve_c(A, xnew, b[:, i].copy(), imax, tol, atol, b.shape[1])
114                    x0[:, i] = xnew
115        else:   
116
117            if not use_c_cg:
118                x0, stats = _conjugate_gradient(A, b, x0, imax, tol, atol, iprint)
119            else:
120                x0 = b.copy()
121                err = cg_solve_c(A, x0, b, imax, tol, atol, 1)
122
123    if err == -1:
124       
125        log.warning('max number of iterations attained from c cg')
126        msg = 'Conjugate gradient solver did not converge'
127        raise ConvergenceError, msg
128
129    if output_stats:
130        return x0, stats
131    else:
132        return x0
133
134   
135def _conjugate_gradient(A, b, x0, 
136                        imax=10000, tol=1.0e-8, atol=1.0e-10, iprint=None):
137    """
138   Try to solve linear equation Ax = b using
139   conjugate gradient method
140
141   Input
142   A: matrix or function which applies a matrix, assumed symmetric
143      A can be either dense or sparse or a function
144      (__mul__ just needs to be defined)
145   b: right hand side
146   x0: inital guess (default the 0 vector)
147   imax: max number of iterations
148   tol: tolerance used for residual
149
150   Output
151   x: approximate solution
152   """
153
154    stats = Stats()
155
156    b  = num.array(b, dtype=num.float)
157    if len(b.shape) != 1:
158        raise VectorShapeError, 'input vector should consist of only one column'
159
160    if x0 is None:
161        x0 = num.zeros(b.shape, dtype=num.float)
162    else:
163        x0 = num.array(x0, dtype=num.float)
164
165    stats.x0 = num.linalg.norm(x0)
166
167    if iprint == None or iprint == 0:
168        iprint = imax
169
170    dx = 0.0
171   
172    i = 1
173    x = x0
174    r = b - A * x
175    d = r
176    rTr = num.dot(r, r)
177    rTr0 = rTr
178
179    stats.rTr0 = rTr0
180
181    #FIXME Let the iterations stop if starting with a small residual
182    while (i < imax and rTr > tol ** 2 * rTr0 and rTr > atol ** 2):
183        q = A * d
184        alpha = rTr / num.dot(d, q)
185        xold = x
186        x = x + alpha * d
187
188        dx = num.linalg.norm(x-xold)
189       
190        #if dx < atol :
191        #    break
192
193        # Padarn Note 26/11/12: This modification to the algorithm seems
194        # unnecessary, but also seem to have been implemented incorrectly -
195        # it was set to perform the more expensive r = b - A * x routine in
196        # 49/50 iterations. Suggest this being either removed completely or
197        # changed to 'if i%50==0' (or equvialent). 
198        #if i % 50:
199        if False:
200            r = b - A * x
201        else:
202            r = r - alpha * q
203        rTrOld = rTr
204        rTr = num.dot(r, r)
205        bt = rTr / rTrOld
206
207        d = r + bt * d
208        i = i + 1
209
210        if i % iprint == 0:
211            log.info('i = %g rTr = %15.8e dx = %15.8e' % (i, rTr, dx))
212
213        if i == imax:
214            log.warning('max number of iterations attained')
215            msg = 'Conjugate gradient solver did not converge: rTr==%20.15e' % rTr
216            raise ConvergenceError, msg
217
218    stats.x = num.linalg.norm(x)
219    stats.iter = i
220    stats.rTr = rTr
221    stats.dx = dx
222
223    return x, stats
224
225
226
227
228   
229def _conjugate_gradient_preconditioned(A, b, x0, M, 
230                        imax=10000, tol=1.0e-8, atol=1.0e-10, iprint=None, Type='None'):
231    """
232   Try to solve linear equation Ax = b using
233   preconditioned conjugate gradient method
234
235   Input
236   A: matrix or function which applies a matrix, assumed symmetric
237      A can be either dense or sparse or a function
238      (__mul__ just needs to be defined)
239   b: right hand side
240   x0: inital guess (default the 0 vector)
241   imax: max number of iterations
242   tol: tolerance used for residual
243
244   Output
245   x: approximate solution
246   """
247
248    # Padarn note: This is temporary while the Jacboi preconditioner is the only
249    # one avaliable.
250    D=[]
251    if not Type=='Jacobi':
252        log.warning('Only the Jacobi Preconditioner is impletment cg_solve python')
253        msg = 'Only the Jacobi Preconditioner is impletment in cg_solve python'
254        raise PreconditionerError, msg
255    else:
256        D=Sparse(A.M, A.M)
257        for i in range(A.M):
258            D[i,i]=1/M[i]
259        D=Sparse_CSR(D)
260
261    stats = Stats()
262
263    b  = num.array(b, dtype=num.float)
264    if len(b.shape) != 1:
265        raise VectorShapeError, 'input vector should consist of only one column'
266
267    if x0 is None:
268        x0 = num.zeros(b.shape, dtype=num.float)
269    else:
270        x0 = num.array(x0, dtype=num.float)
271
272    stats.x0 = num.linalg.norm(x0)
273
274    if iprint == None or iprint == 0:
275        iprint = imax
276
277    dx = 0.0
278   
279    i = 1
280    x = x0
281    r = b - A * x
282    z = D * r
283    d = r
284    rTr = num.dot(r, z)
285    rTr0 = rTr
286
287    stats.rTr0 = rTr0
288   
289    #FIXME Let the iterations stop if starting with a small residual
290    while (i < imax and rTr > tol ** 2 * rTr0 and rTr > atol ** 2):
291        q = A * d
292        alpha = rTr / num.dot(d, q)
293        xold = x
294        x = x + alpha * d
295
296        dx = num.linalg.norm(x-xold)
297       
298        #if dx < atol :
299        #    break
300
301        # Padarn Note 26/11/12: This modification to the algorithm seems
302        # unnecessary, but also seem to have been implemented incorrectly -
303        # it was set to perform the more expensive r = b - A * x routine in
304        # 49/50 iterations. Suggest this being either removed completely or
305        # changed to 'if i%50==0' (or equvialent). 
306        #if i % 50:
307        if False:
308            r = b - A * x
309        else:
310            r = r - alpha * q
311        rTrOld = rTr
312        z = D * r
313        rTr = num.dot(r, z)
314        bt = rTr / rTrOld
315
316        d = z + bt * d
317        i = i + 1
318        if i % iprint == 0:
319            log.info('i = %g rTr = %15.8e dx = %15.8e' % (i, rTr, dx))
320
321        if i == imax:
322            log.warning('max number of iterations attained')
323            msg = 'Conjugate gradient solver did not converge: rTr==%20.15e' % rTr
324            raise ConvergenceError, msg
325
326    stats.x = num.linalg.norm(x)
327    stats.iter = i
328    stats.rTr = rTr
329    stats.dx = dx
330
331    return x, stats
Note: See TracBrowser for help on using the repository browser.